From 3603a706d079d64c37d01884ab5ad00474d7e954 Mon Sep 17 00:00:00 2001 From: Fokko Driesprong Date: Fri, 29 Sep 2023 11:16:26 +0200 Subject: [PATCH] Python: Migrate from `iceberg` to `iceberg-python` --- .github/ISSUE_TEMPLATE/iceberg_bug_report.yml | 28 + .../ISSUE_TEMPLATE/iceberg_improvement.yml | 11 + .github/ISSUE_TEMPLATE/iceberg_question.yml | 14 + .github/dependabot.yml | 32 + .github/workflows/python-ci-docs.yml | 56 + .github/workflows/python-ci.yml | 51 + .github/workflows/python-integration.yml | 44 + .github/workflows/python-release.yml | 88 + .github/workflows/stale.yml | 49 + .gitignore | 52 + .pre-commit-config.yaml | 73 + LICENSE | 235 + MANIFEST.in | 18 + Makefile | 70 + NOTICE | 8 + README.md | 26 + build-module.py | 71 + dev/.rat-excludes | 2 + dev/Dockerfile | 75 + dev/check-license | 86 + dev/docker-compose-azurite.yml | 26 + dev/docker-compose-gcs-server.yml | 30 + dev/docker-compose-integration.yml | 89 + dev/docker-compose.yml | 47 + dev/entrypoint.sh | 25 + dev/provision.py | 281 + dev/run-azurite.sh | 33 + dev/run-gcs-server.sh | 33 + dev/run-minio.sh | 33 + dev/spark-defaults.conf | 29 + mkdocs/README.md | 28 + mkdocs/docs/SUMMARY.md | 31 + mkdocs/docs/api.md | 424 + .../docs/assets/images/iceberg-logo-icon.png | Bin 0 -> 17608 bytes mkdocs/docs/cli.md | 220 + mkdocs/docs/configuration.md | 200 + mkdocs/docs/contributing.md | 163 + mkdocs/docs/feature-support.md | 72 + mkdocs/docs/how-to-release.md | 160 + mkdocs/docs/index.md | 73 + mkdocs/docs/verify-release.md | 110 + mkdocs/gen_doc_stubs.py | 55 + mkdocs/mkdocs.yml | 60 + mkdocs/requirements.txt | 28 + poetry.lock | 3588 + pyiceberg/__init__.py | 18 + pyiceberg/avro/__init__.py | 23 + pyiceberg/avro/codecs/__init__.py | 42 + pyiceberg/avro/codecs/bzip2.py | 43 + pyiceberg/avro/codecs/codec.py | 33 + pyiceberg/avro/codecs/deflate.py | 36 + pyiceberg/avro/codecs/snappy_codec.py | 69 + pyiceberg/avro/codecs/zstandard_codec.py | 53 + pyiceberg/avro/decoder.py | 186 + pyiceberg/avro/decoder_basic.c | 65 + pyiceberg/avro/decoder_fast.pyi | 56 + pyiceberg/avro/decoder_fast.pyx | 182 + pyiceberg/avro/encoder.py | 75 + pyiceberg/avro/file.py | 276 + pyiceberg/avro/reader.py | 492 + pyiceberg/avro/resolver.py | 397 + pyiceberg/avro/writer.py | 203 + pyiceberg/catalog/__init__.py | 606 + pyiceberg/catalog/dynamodb.py | 796 + pyiceberg/catalog/glue.py | 498 + pyiceberg/catalog/hive.py | 544 + pyiceberg/catalog/noop.py | 94 + pyiceberg/catalog/rest.py | 625 + pyiceberg/catalog/sql.py | 515 + pyiceberg/cli/__init__.py | 16 + pyiceberg/cli/console.py | 374 + pyiceberg/cli/output.py | 228 + pyiceberg/conversions.py | 324 + pyiceberg/exceptions.py | 112 + pyiceberg/expressions/__init__.py | 899 + pyiceberg/expressions/literals.py | 673 + pyiceberg/expressions/parser.py | 255 + pyiceberg/expressions/visitors.py | 1419 + pyiceberg/io/__init__.py | 346 + pyiceberg/io/fsspec.py | 334 + pyiceberg/io/pyarrow.py | 1528 + pyiceberg/manifest.py | 861 + pyiceberg/partitioning.py | 217 + pyiceberg/py.typed | 18 + pyiceberg/schema.py | 1497 + pyiceberg/serializers.py | 131 + pyiceberg/table/__init__.py | 1568 + pyiceberg/table/metadata.py | 447 + pyiceberg/table/refs.py | 41 + pyiceberg/table/snapshots.py | 118 + pyiceberg/table/sorting.py | 192 + pyiceberg/transforms.py | 840 + pyiceberg/typedef.py | 199 + pyiceberg/types.py | 708 + pyiceberg/utils/__init__.py | 16 + pyiceberg/utils/bin_packing.py | 106 + pyiceberg/utils/concurrent.py | 48 + pyiceberg/utils/config.py | 156 + pyiceberg/utils/datetime.py | 185 + pyiceberg/utils/decimal.py | 127 + pyiceberg/utils/deprecated.py | 45 + pyiceberg/utils/lazydict.py | 68 + pyiceberg/utils/parsing.py | 37 + pyiceberg/utils/schema_conversion.py | 609 + pyiceberg/utils/singleton.py | 48 + pyiceberg/utils/truncate.py | 48 + pyproject.toml | 354 + tests/avro/test_decoder.py | 207 + tests/avro/test_encoder.py | 134 + tests/avro/test_file.py | 289 + tests/avro/test_reader.py | 385 + tests/avro/test_resolver.py | 303 + tests/avro/test_writer.py | 237 + tests/catalog/integration_test_dynamodb.py | 255 + tests/catalog/integration_test_glue.py | 263 + tests/catalog/test_base.py | 605 + tests/catalog/test_dynamodb.py | 468 + tests/catalog/test_glue.py | 459 + tests/catalog/test_hive.py | 698 + tests/catalog/test_rest.py | 945 + tests/catalog/test_sql.py | 384 + tests/cli/test_console.py | 930 + tests/cli/test_output.py | 16 + tests/conftest.py | 1661 + tests/expressions/test_evaluator.py | 927 + tests/expressions/test_expressions.py | 1161 + tests/expressions/test_literals.py | 897 + tests/expressions/test_parser.py | 168 + tests/expressions/test_projection.py | 378 + tests/expressions/test_visitors.py | 1631 + tests/io/test_fsspec.py | 655 + tests/io/test_io.py | 313 + tests/io/test_pyarrow.py | 1544 + tests/io/test_pyarrow_stats.py | 798 + tests/io/test_pyarrow_visitor.py | 271 + tests/table/test_init.py | 508 + tests/table/test_metadata.py | 713 + tests/table/test_partitioning.py | 131 + tests/table/test_refs.py | 34 + tests/table/test_snapshots.py | 122 + tests/table/test_sorting.py | 97 + tests/test_conversions.py | 546 + tests/test_integration.py | 366 + tests/test_integration_schema.py | 2471 + tests/test_schema.py | 790 + tests/test_transforms.py | 932 + tests/test_typedef.py | 89 + tests/test_types.py | 615 + tests/test_version.py | 28 + tests/utils/test_bin_packing.py | 86 + tests/utils/test_concurrent.py | 52 + tests/utils/test_config.py | 56 + tests/utils/test_datetime.py | 73 + tests/utils/test_decimal.py | 40 + tests/utils/test_deprecated.py | 37 + tests/utils/test_lazydict.py | 31 + tests/utils/test_manifest.py | 280 + tests/utils/test_schema_conversion.py | 370 + tests/utils/test_singleton.py | 30 + tests/utils/test_truncate.py | 27 + vendor/README.md | 45 + vendor/fb303/FacebookService.py | 2420 + vendor/fb303/__init__.py | 18 + vendor/fb303/constants.py | 26 + vendor/fb303/ttypes.py | 64 + vendor/hive_metastore/ThriftHiveMetastore.py | 72960 ++++++++++++++++ vendor/hive_metastore/__init__.py | 17 + vendor/hive_metastore/constants.py | 66 + vendor/hive_metastore/ttypes.py | 42515 +++++++++ 169 files changed, 171683 insertions(+) create mode 100644 .github/ISSUE_TEMPLATE/iceberg_bug_report.yml create mode 100644 .github/ISSUE_TEMPLATE/iceberg_improvement.yml create mode 100644 .github/ISSUE_TEMPLATE/iceberg_question.yml create mode 100644 .github/dependabot.yml create mode 100644 .github/workflows/python-ci-docs.yml create mode 100644 .github/workflows/python-ci.yml create mode 100644 .github/workflows/python-integration.yml create mode 100644 .github/workflows/python-release.yml create mode 100644 .github/workflows/stale.yml create mode 100644 .gitignore create mode 100644 .pre-commit-config.yaml create mode 100644 LICENSE create mode 100644 MANIFEST.in create mode 100644 Makefile create mode 100644 NOTICE create mode 100644 README.md create mode 100644 build-module.py create mode 100644 dev/.rat-excludes create mode 100644 dev/Dockerfile create mode 100755 dev/check-license create mode 100644 dev/docker-compose-azurite.yml create mode 100644 dev/docker-compose-gcs-server.yml create mode 100644 dev/docker-compose-integration.yml create mode 100644 dev/docker-compose.yml create mode 100755 dev/entrypoint.sh create mode 100644 dev/provision.py create mode 100755 dev/run-azurite.sh create mode 100644 dev/run-gcs-server.sh create mode 100755 dev/run-minio.sh create mode 100644 dev/spark-defaults.conf create mode 100644 mkdocs/README.md create mode 100644 mkdocs/docs/SUMMARY.md create mode 100644 mkdocs/docs/api.md create mode 100644 mkdocs/docs/assets/images/iceberg-logo-icon.png create mode 100644 mkdocs/docs/cli.md create mode 100644 mkdocs/docs/configuration.md create mode 100644 mkdocs/docs/contributing.md create mode 100644 mkdocs/docs/feature-support.md create mode 100644 mkdocs/docs/how-to-release.md create mode 100644 mkdocs/docs/index.md create mode 100644 mkdocs/docs/verify-release.md create mode 100644 mkdocs/gen_doc_stubs.py create mode 100644 mkdocs/mkdocs.yml create mode 100644 mkdocs/requirements.txt create mode 100644 poetry.lock create mode 100644 pyiceberg/__init__.py create mode 100644 pyiceberg/avro/__init__.py create mode 100644 pyiceberg/avro/codecs/__init__.py create mode 100644 pyiceberg/avro/codecs/bzip2.py create mode 100644 pyiceberg/avro/codecs/codec.py create mode 100644 pyiceberg/avro/codecs/deflate.py create mode 100644 pyiceberg/avro/codecs/snappy_codec.py create mode 100644 pyiceberg/avro/codecs/zstandard_codec.py create mode 100644 pyiceberg/avro/decoder.py create mode 100644 pyiceberg/avro/decoder_basic.c create mode 100644 pyiceberg/avro/decoder_fast.pyi create mode 100644 pyiceberg/avro/decoder_fast.pyx create mode 100644 pyiceberg/avro/encoder.py create mode 100644 pyiceberg/avro/file.py create mode 100644 pyiceberg/avro/reader.py create mode 100644 pyiceberg/avro/resolver.py create mode 100644 pyiceberg/avro/writer.py create mode 100644 pyiceberg/catalog/__init__.py create mode 100644 pyiceberg/catalog/dynamodb.py create mode 100644 pyiceberg/catalog/glue.py create mode 100644 pyiceberg/catalog/hive.py create mode 100644 pyiceberg/catalog/noop.py create mode 100644 pyiceberg/catalog/rest.py create mode 100644 pyiceberg/catalog/sql.py create mode 100644 pyiceberg/cli/__init__.py create mode 100644 pyiceberg/cli/console.py create mode 100644 pyiceberg/cli/output.py create mode 100644 pyiceberg/conversions.py create mode 100644 pyiceberg/exceptions.py create mode 100644 pyiceberg/expressions/__init__.py create mode 100644 pyiceberg/expressions/literals.py create mode 100644 pyiceberg/expressions/parser.py create mode 100644 pyiceberg/expressions/visitors.py create mode 100644 pyiceberg/io/__init__.py create mode 100644 pyiceberg/io/fsspec.py create mode 100644 pyiceberg/io/pyarrow.py create mode 100644 pyiceberg/manifest.py create mode 100644 pyiceberg/partitioning.py create mode 100644 pyiceberg/py.typed create mode 100644 pyiceberg/schema.py create mode 100644 pyiceberg/serializers.py create mode 100644 pyiceberg/table/__init__.py create mode 100644 pyiceberg/table/metadata.py create mode 100644 pyiceberg/table/refs.py create mode 100644 pyiceberg/table/snapshots.py create mode 100644 pyiceberg/table/sorting.py create mode 100644 pyiceberg/transforms.py create mode 100644 pyiceberg/typedef.py create mode 100644 pyiceberg/types.py create mode 100644 pyiceberg/utils/__init__.py create mode 100644 pyiceberg/utils/bin_packing.py create mode 100644 pyiceberg/utils/concurrent.py create mode 100644 pyiceberg/utils/config.py create mode 100644 pyiceberg/utils/datetime.py create mode 100644 pyiceberg/utils/decimal.py create mode 100644 pyiceberg/utils/deprecated.py create mode 100644 pyiceberg/utils/lazydict.py create mode 100644 pyiceberg/utils/parsing.py create mode 100644 pyiceberg/utils/schema_conversion.py create mode 100644 pyiceberg/utils/singleton.py create mode 100644 pyiceberg/utils/truncate.py create mode 100644 pyproject.toml create mode 100644 tests/avro/test_decoder.py create mode 100644 tests/avro/test_encoder.py create mode 100644 tests/avro/test_file.py create mode 100644 tests/avro/test_reader.py create mode 100644 tests/avro/test_resolver.py create mode 100644 tests/avro/test_writer.py create mode 100644 tests/catalog/integration_test_dynamodb.py create mode 100644 tests/catalog/integration_test_glue.py create mode 100644 tests/catalog/test_base.py create mode 100644 tests/catalog/test_dynamodb.py create mode 100644 tests/catalog/test_glue.py create mode 100644 tests/catalog/test_hive.py create mode 100644 tests/catalog/test_rest.py create mode 100644 tests/catalog/test_sql.py create mode 100644 tests/cli/test_console.py create mode 100644 tests/cli/test_output.py create mode 100644 tests/conftest.py create mode 100644 tests/expressions/test_evaluator.py create mode 100644 tests/expressions/test_expressions.py create mode 100644 tests/expressions/test_literals.py create mode 100644 tests/expressions/test_parser.py create mode 100644 tests/expressions/test_projection.py create mode 100644 tests/expressions/test_visitors.py create mode 100644 tests/io/test_fsspec.py create mode 100644 tests/io/test_io.py create mode 100644 tests/io/test_pyarrow.py create mode 100644 tests/io/test_pyarrow_stats.py create mode 100644 tests/io/test_pyarrow_visitor.py create mode 100644 tests/table/test_init.py create mode 100644 tests/table/test_metadata.py create mode 100644 tests/table/test_partitioning.py create mode 100644 tests/table/test_refs.py create mode 100644 tests/table/test_snapshots.py create mode 100644 tests/table/test_sorting.py create mode 100644 tests/test_conversions.py create mode 100644 tests/test_integration.py create mode 100644 tests/test_integration_schema.py create mode 100644 tests/test_schema.py create mode 100644 tests/test_transforms.py create mode 100644 tests/test_typedef.py create mode 100644 tests/test_types.py create mode 100644 tests/test_version.py create mode 100644 tests/utils/test_bin_packing.py create mode 100644 tests/utils/test_concurrent.py create mode 100644 tests/utils/test_config.py create mode 100644 tests/utils/test_datetime.py create mode 100644 tests/utils/test_decimal.py create mode 100644 tests/utils/test_deprecated.py create mode 100644 tests/utils/test_lazydict.py create mode 100644 tests/utils/test_manifest.py create mode 100644 tests/utils/test_schema_conversion.py create mode 100644 tests/utils/test_singleton.py create mode 100644 tests/utils/test_truncate.py create mode 100644 vendor/README.md create mode 100644 vendor/fb303/FacebookService.py create mode 100644 vendor/fb303/__init__.py create mode 100644 vendor/fb303/constants.py create mode 100644 vendor/fb303/ttypes.py create mode 100644 vendor/hive_metastore/ThriftHiveMetastore.py create mode 100644 vendor/hive_metastore/__init__.py create mode 100644 vendor/hive_metastore/constants.py create mode 100644 vendor/hive_metastore/ttypes.py diff --git a/.github/ISSUE_TEMPLATE/iceberg_bug_report.yml b/.github/ISSUE_TEMPLATE/iceberg_bug_report.yml new file mode 100644 index 0000000000..b56cba8555 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/iceberg_bug_report.yml @@ -0,0 +1,28 @@ +--- +name: Iceberg Bug report 🐞 +description: Problems, bugs and issues with Apache PyIceberg +labels: ["kind:bug"] +body: + - type: dropdown + attributes: + label: PyIceberg version + description: What PyIceberg version are you using? + multiple: false + options: + - "0.1.0" + - "0.2.0" + - "0.3.0" + - "0.4.0" + - "0.5.0" + - "main (development)" + validations: + required: false + - type: textarea + attributes: + label: Please describe the bug 🐞 + description: > + Please describe the problem, what to expect, and how to reproduce. + Feel free to include stacktraces and the Iceberg catalog configuration. + You can include files by dragging and dropping them here. + validations: + required: true diff --git a/.github/ISSUE_TEMPLATE/iceberg_improvement.yml b/.github/ISSUE_TEMPLATE/iceberg_improvement.yml new file mode 100644 index 0000000000..d61406efef --- /dev/null +++ b/.github/ISSUE_TEMPLATE/iceberg_improvement.yml @@ -0,0 +1,11 @@ +--- +name: Iceberg Improvement / Feature Request +description: New features with Apache PyIceberg +labels: ["kind:feature request"] +body: + - type: textarea + attributes: + label: Feature Request / Improvement + description: Please describe the feature and elaborate on the use case and motivation behind it + validations: + required: true diff --git a/.github/ISSUE_TEMPLATE/iceberg_question.yml b/.github/ISSUE_TEMPLATE/iceberg_question.yml new file mode 100644 index 0000000000..68bc1bc478 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/iceberg_question.yml @@ -0,0 +1,14 @@ +--- +name: Iceberg Question +description: Questions around Apache PyIceberg +labels: ["kind:question"] +body: + - type: markdown + attributes: + value: "Feel free to ask your question on [Slack](https://join.slack.com/t/apache-iceberg/shared_invite/zt-1znkcg5zm-7_FE~pcox347XwZE3GNfPg) as well." + - type: textarea + attributes: + label: Question + description: What is your question? + validations: + required: true diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 0000000000..8ee1223af2 --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,32 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +version: 2 +updates: + - package-ecosystem: "pip" + directory: "/python/" + schedule: + interval: "weekly" + day: "sunday" + open-pull-requests-limit: 5 + - package-ecosystem: "github-actions" + directory: "/" + schedule: + interval: "weekly" + day: "sunday" diff --git a/.github/workflows/python-ci-docs.yml b/.github/workflows/python-ci-docs.yml new file mode 100644 index 0000000000..fc65fc9545 --- /dev/null +++ b/.github/workflows/python-ci-docs.yml @@ -0,0 +1,56 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +name: "Python Docs" +on: + workflow_dispatch: + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: ${{ github.event_name == 'pull_request' }} + +jobs: + docs: + runs-on: ubuntu-22.04 + + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-python@v4 + with: + python-version: ${{ matrix.python }} + - name: Install + working-directory: ./mkdocs + run: pip install -r requirements.txt + - name: Build + working-directory: ./mkdocs + run: mkdocs build --strict + - name: Copy + working-directory: ./mkdocs + run: mv ./site /tmp/site + - name: Push changes to gh-pages branch + run: | + git checkout --orphan gh-pages-tmp + git rm --quiet -rf . + cp -r /tmp/site/* . + git config --global user.name 'GitHub Actions' + git config --global user.email 'actions@github.com' + echo "py.iceberg.apache.org" > CNAME + git add --all + git commit -m 'Publish Python docs' + git push -f origin gh-pages-tmp:gh-pages || true diff --git a/.github/workflows/python-ci.yml b/.github/workflows/python-ci.yml new file mode 100644 index 0000000000..86bce01a20 --- /dev/null +++ b/.github/workflows/python-ci.yml @@ -0,0 +1,51 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +name: "Python CI" +on: + push: + branches: + - 'main' + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: ${{ github.event_name == 'pull_request' }} + +jobs: + lint-and-test: + runs-on: ubuntu-22.04 + strategy: + matrix: + python: ['3.8', '3.9', '3.10', '3.11'] + + steps: + - uses: actions/checkout@v4 + - name: Install poetry + run: make install-poetry + - uses: actions/setup-python@v4 + with: + python-version: ${{ matrix.python }} + cache: poetry + cache-dependency-path: poetry.lock + - name: Install + run: make install-dependencies + - name: Linters + run: make lint + - name: Tests + run: make test-coverage diff --git a/.github/workflows/python-integration.yml b/.github/workflows/python-integration.yml new file mode 100644 index 0000000000..c036d7abb4 --- /dev/null +++ b/.github/workflows/python-integration.yml @@ -0,0 +1,44 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +name: "Python Integration" +on: + push: + branches: + - 'main' + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: ${{ github.event_name == 'pull_request' }} + +jobs: + integration-test: + runs-on: ubuntu-20.04 + + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 2 + - name: Install + run: make install + - name: Run integration tests + run: make test-integration + - name: Show debug logs + if: ${{ failure() }} + run: docker-compose -f python/dev/docker-compose.yml logs diff --git a/.github/workflows/python-release.yml b/.github/workflows/python-release.yml new file mode 100644 index 0000000000..6902775b13 --- /dev/null +++ b/.github/workflows/python-release.yml @@ -0,0 +1,88 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +name: "Python Release" + +on: + workflow_dispatch: + inputs: + version: + description: 'Version' + type: string + default: 'master' + + +jobs: + build_wheels: + name: Build wheels on ${{ matrix.os }} + runs-on: ${{ matrix.os }} + strategy: + matrix: + os: [ ubuntu-22.04, windows-2022, macos-11 ] + + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - uses: actions/setup-python@v4 + with: + python-version: '3.8' + + - name: Install poetry + run: pip install poetry + + - name: Set version + run: python -m poetry version "${{ inputs.version }}" + working-directory: ./python + if: "${{ github.event.inputs.version != 'master' }}" + + # Publish the source distribution with the version that's in + # the repository, otherwise the tests will fail + - name: Compile source distribution + run: python3 -m poetry build --format=sdist + if: startsWith(matrix.os, 'ubuntu') + working-directory: ./python + + - name: Build wheels + uses: pypa/cibuildwheel@v2.16.0 + with: + output-dir: wheelhouse + config-file: "pyproject.toml" + env: + # Ignore 32 bit architectures + CIBW_ARCHS: "auto64" + CIBW_PROJECT_REQUIRES_PYTHON: ">=3.8,<3.12" + CIBW_TEST_REQUIRES: "pytest==7.4.2 moto==4.2.2" + CIBW_TEST_EXTRAS: "s3fs,glue" + CIBW_TEST_COMMAND: "pytest -Werror {project}/tests/avro/test_decoder.py" + # There is an upstream issue with installing on MacOSX + # https://github.com/pypa/cibuildwheel/issues/1603 + # Ignore tests for pypy since not all dependencies are compiled for it + # and would require a local rust build chain + CIBW_TEST_SKIP: "pp* *macosx*" + + - name: Add source distribution + if: startsWith(matrix.os, 'ubuntu') + run: ls -lah dist/* && cp dist/* wheelhouse/ + + - uses: actions/upload-artifact@v3 + with: + name: "release-${{ github.event.inputs.version }}" + path: ./wheelhouse/* diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml new file mode 100644 index 0000000000..566ae2441e --- /dev/null +++ b/.github/workflows/stale.yml @@ -0,0 +1,49 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +name: "Close Stale Issues" +on: + schedule: + - cron: '0 0 * * *' + +permissions: + # All other permissions are set to none + issues: write + +jobs: + stale: + if: github.repository_owner == 'apache' + runs-on: ubuntu-22.04 + steps: + - uses: actions/stale@v8.0.0 + with: + stale-issue-label: 'stale' + exempt-issue-labels: 'not-stale' + days-before-issue-stale: 180 + days-before-issue-close: 14 + # Only close stale issues, leave PRs alone + days-before-pr-stale: -1 + stale-issue-message: > + This issue has been automatically marked as stale because it has been open for 180 days + with no activity. It will be closed in next 14 days if no further activity occurs. To + permanently prevent this issue from being considered stale, add the label 'not-stale', + but commenting on the issue is preferred when possible. + close-issue-message: > + This issue has been closed because it has not received any activity in the last 14 days + since being marked as 'stale' diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000000..7043f0e7d4 --- /dev/null +++ b/.gitignore @@ -0,0 +1,52 @@ +*.swp +.DS_Store +.cache +tmp/ +site + +# intellij files +.idea +.idea_modules/ +*.ipr +*.iws +*.iml +out + +# rat library install location +lib/ + +__pycache__/ +*.py[cod] +.eggs/ +.tox/ +env/ +venv/ +*.egg-info/ +test-reports +build/ +dist/ +sdist/ +.coverage +coverage.xml +.pytest_cache/ + +# vscode/eclipse files +.classpath +.project +.settings +bin/ + +# Hive/metastore files +metastore_db/ + +# Spark/metastore files +spark-warehouse/ +derby.log + +# Python stuff +.mypy_cache/ +htmlcov + +pyiceberg/avro/decoder_fast.c +pyiceberg/avro/*.html +pyiceberg/avro/*.so diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 0000000000..91e92912d9 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,73 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +--- +files: ^python/ +exclude: ^python/vendor/ + +repos: + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v4.4.0 + hooks: + - id: trailing-whitespace + - id: end-of-file-fixer + - id: check-docstring-first + - id: debug-statements + - id: check-yaml + - id: check-ast + - repo: https://github.com/astral-sh/ruff-pre-commit + # Ruff version (Used for linting) + rev: v0.0.291 + hooks: + - id: ruff + args: [ --fix, --exit-non-zero-on-fix ] + - repo: https://github.com/ambv/black + rev: 23.9.1 + hooks: + - id: black + args: [ --skip-string-normalization ] + - repo: https://github.com/pre-commit/mirrors-mypy + rev: v1.5.1 + hooks: + - id: mypy + args: + [ --install-types, --non-interactive, --config=python/pyproject.toml ] + - repo: https://github.com/hadialqattan/pycln + rev: v2.2.2 + hooks: + - id: pycln + args: [ --config=python/pyproject.toml ] + - repo: https://github.com/executablebooks/mdformat + rev: 0.7.17 + hooks: + - id: mdformat + additional_dependencies: + - mdformat-black==0.1.1 + - mdformat-config==0.1.3 + - mdformat-beautysh==0.1.1 + - mdformat-admon==1.0.1 + - mdformat-mkdocs==1.0.1 + - mdformat-frontmatter==2.0.1 + - repo: https://github.com/pycqa/pydocstyle + rev: 6.3.0 + hooks: + - id: pydocstyle + args: + [ + "--ignore=D100,D102,D101,D103,D104,D107,D203,D212,D213,D404,D405,D406,D407,D411,D413,D415,D417", + ] + additional_dependencies: + - tomli==2.0.1 diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000000..ffdd12aad2 --- /dev/null +++ b/LICENSE @@ -0,0 +1,235 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +-------------------------------------------------------------------------------- + +This product includes code from Apache Avro. + +* Code for initializing the Avro (de)compression codecs +* The Binary decoder for reading in an Avro byte stream + +Copyright: 2014-2022 The Apache Software Foundation. +Home page: https://avro.apache.org/ +License: https://www.apache.org/licenses/LICENSE-2.0 + +-------------------------------------------------------------------------------- + +This product includes code from Apache Thrift. + +* Uses the fb303.thrift file that's part of Hive's thrift service in vendor/fb303/ + +Copyright: 2006-2022 The Apache Software Foundation. +Home page: https://thrift.apache.org/ +License: https://www.apache.org/licenses/LICENSE-2.0 + +-------------------------------------------------------------------------------- + +This product includes code from Apache Hive. + +* Uses hive_metastore.thrift to generate the Hive Metastore client in vendor/hive_metastore/ + +Copyright: 2008-2022 The Apache Software Foundation. +Home page: https://hive.apache.org/ +License: https://www.apache.org/licenses/LICENSE-2.0 + +-------------------------------------------------------------------------------- diff --git a/MANIFEST.in b/MANIFEST.in new file mode 100644 index 0000000000..a31dc633ad --- /dev/null +++ b/MANIFEST.in @@ -0,0 +1,18 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +graft src diff --git a/Makefile b/Makefile new file mode 100644 index 0000000000..80e6f4dee7 --- /dev/null +++ b/Makefile @@ -0,0 +1,70 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +install-poetry: + pip install poetry==1.6.1 + +install-dependencies: + poetry install -E pyarrow -E hive -E s3fs -E glue -E adlfs -E duckdb -E ray -E sql-postgres -E gcsfs + +install: | install-poetry install-dependencies + +check-license: + ./dev/check-license + +lint: + poetry run pre-commit run --all-files + +test: + poetry run pytest tests/ -m "(unmarked or parametrize) and not integration" ${PYTEST_ARGS} + +test-s3: + sh ./dev/run-minio.sh + poetry run pytest tests/ -m s3 ${PYTEST_ARGS} + +test-integration: + docker-compose -f dev/docker-compose-integration.yml kill + docker-compose -f dev/docker-compose-integration.yml rm -f + docker-compose -f dev/docker-compose-integration.yml up -d + sleep 10 + docker-compose -f dev/docker-compose-integration.yml exec -T spark-iceberg ipython ./provision.py + poetry run pytest tests/ -v -m integration ${PYTEST_ARGS} + +test-integration-rebuild: + docker-compose -f dev/docker-compose-integration.yml kill + docker-compose -f dev/docker-compose-integration.yml rm -f + docker-compose -f dev/docker-compose-integration.yml build --no-cache + +test-adlfs: + sh ./dev/run-azurite.sh + poetry run pytest tests/ -m adlfs ${PYTEST_ARGS} + +test-gcs: + sh ./dev/run-gcs-server.sh + poetry run pytest tests/ -m gcs ${PYTEST_ARGS} + +test-coverage: + docker-compose -f dev/docker-compose-integration.yml kill + docker-compose -f dev/docker-compose-integration.yml rm -f + docker-compose -f dev/docker-compose-integration.yml up -d + sh ./dev/run-azurite.sh + sh ./dev/run-gcs-server.sh + docker-compose -f dev/docker-compose-integration.yml exec -T spark-iceberg ipython ./provision.py + poetry run coverage run --source=pyiceberg/ -m pytest tests/ ${PYTEST_ARGS} + poetry run coverage report -m --fail-under=90 + poetry run coverage html + poetry run coverage xml diff --git a/NOTICE b/NOTICE new file mode 100644 index 0000000000..d7a3c57526 --- /dev/null +++ b/NOTICE @@ -0,0 +1,8 @@ + +Apache Iceberg +Copyright 2017-2022 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + +-------------------------------------------------------------------------------- diff --git a/README.md b/README.md new file mode 100644 index 0000000000..9d093ec1a6 --- /dev/null +++ b/README.md @@ -0,0 +1,26 @@ + + +# Apache PyIceberg + +PyIceberg is a Python library for programmatic access to Iceberg table metadata as well as to table data in Iceberg format. It is a Python implementation of the [Iceberg table spec](https://iceberg.apache.org/spec/). + +The documentation is available at [https://py.iceberg.apache.org/](https://py.iceberg.apache.org/). + +# Get in Touch + +- [Iceberg community](https://iceberg.apache.org/community/) diff --git a/build-module.py b/build-module.py new file mode 100644 index 0000000000..d91375e8e6 --- /dev/null +++ b/build-module.py @@ -0,0 +1,71 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import os +import shutil +from pathlib import Path + +allowed_to_fail = os.environ.get("CIBUILDWHEEL", "0") != "1" + + +def build_cython_extensions() -> None: + import Cython.Compiler.Options + from Cython.Build import build_ext, cythonize + from setuptools import Extension + from setuptools.dist import Distribution + + Cython.Compiler.Options.annotate = True + + if os.name == "nt": # Windows + extra_compile_args = [ + "/O2", + ] + else: # UNIX-based systems + extra_compile_args = [ + "-O3", + ] + + package_path = "pyiceberg" + + extension = Extension( + # Your .pyx file will be available to cpython at this location. + name="pyiceberg.avro.decoder_fast", + sources=[ + os.path.join(package_path, "avro", "decoder_fast.pyx"), + ], + extra_compile_args=extra_compile_args, + language="c", + ) + + ext_modules = cythonize([extension], include_path=list(package_path), language_level=3, annotate=True) + dist = Distribution({"ext_modules": ext_modules}) + cmd = build_ext(dist) + cmd.ensure_finalized() + + cmd.run() + + for output in cmd.get_outputs(): + output = Path(output) + relative_extension = output.relative_to(cmd.build_lib) + shutil.copyfile(output, relative_extension) + + +try: + build_cython_extensions() +except Exception: + if not allowed_to_fail: + raise diff --git a/dev/.rat-excludes b/dev/.rat-excludes new file mode 100644 index 0000000000..fb90e7ed30 --- /dev/null +++ b/dev/.rat-excludes @@ -0,0 +1,2 @@ +.rat-excludes +build diff --git a/dev/Dockerfile b/dev/Dockerfile new file mode 100644 index 0000000000..a4099d3494 --- /dev/null +++ b/dev/Dockerfile @@ -0,0 +1,75 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +FROM python:3.9-bullseye + +RUN apt-get -qq update && \ + apt-get -qq install -y --no-install-recommends \ + sudo \ + curl \ + vim \ + unzip \ + openjdk-11-jdk \ + build-essential \ + software-properties-common \ + ssh && \ + apt-get -qq clean && \ + rm -rf /var/lib/apt/lists/* + +# Optional env variables +ENV SPARK_HOME=${SPARK_HOME:-"/opt/spark"} +ENV HADOOP_HOME=${HADOOP_HOME:-"/opt/hadoop"} +ENV PYTHONPATH=$SPARK_HOME/python:$SPARK_HOME/python/lib/py4j-0.10.9.7-src.zip:$PYTHONPATH + +RUN mkdir -p ${HADOOP_HOME} && mkdir -p ${SPARK_HOME} && mkdir -p /home/iceberg/spark-events +WORKDIR ${SPARK_HOME} + +ENV SPARK_VERSION=3.4.1 +ENV ICEBERG_SPARK_RUNTIME_VERSION=3.4_2.12 +ENV ICEBERG_VERSION=1.3.1 +ENV AWS_SDK_VERSION=2.20.18 +ENV PYICEBERG_VERSION=0.4.0 + +RUN curl --retry 3 -s -C - https://dlcdn.apache.org/spark/spark-${SPARK_VERSION}/spark-${SPARK_VERSION}-bin-hadoop3.tgz -o spark-${SPARK_VERSION}-bin-hadoop3.tgz \ + && tar xzf spark-${SPARK_VERSION}-bin-hadoop3.tgz --directory /opt/spark --strip-components 1 \ + && rm -rf spark-${SPARK_VERSION}-bin-hadoop3.tgz + +# Download iceberg spark runtime +RUN curl -s https://repo1.maven.org/maven2/org/apache/iceberg/iceberg-spark-runtime-${ICEBERG_SPARK_RUNTIME_VERSION}/${ICEBERG_VERSION}/iceberg-spark-runtime-${ICEBERG_SPARK_RUNTIME_VERSION}-${ICEBERG_VERSION}.jar -Lo iceberg-spark-runtime-${ICEBERG_SPARK_RUNTIME_VERSION}-${ICEBERG_VERSION}.jar \ + && mv iceberg-spark-runtime-${ICEBERG_SPARK_RUNTIME_VERSION}-${ICEBERG_VERSION}.jar /opt/spark/jars + +# Download Java AWS SDK +RUN curl -s https://repo1.maven.org/maven2/software/amazon/awssdk/bundle/${AWS_SDK_VERSION}/bundle-${AWS_SDK_VERSION}.jar -Lo bundle-${AWS_SDK_VERSION}.jar \ + && mv bundle-${AWS_SDK_VERSION}.jar /opt/spark/jars + +# Download URL connection client required for S3FileIO +RUN curl -s https://repo1.maven.org/maven2/software/amazon/awssdk/url-connection-client/${AWS_SDK_VERSION}/url-connection-client-${AWS_SDK_VERSION}.jar -Lo url-connection-client-${AWS_SDK_VERSION}.jar \ + && mv url-connection-client-${AWS_SDK_VERSION}.jar /opt/spark/jars + +COPY spark-defaults.conf /opt/spark/conf +ENV PATH="/opt/spark/sbin:/opt/spark/bin:${PATH}" + +RUN chmod u+x /opt/spark/sbin/* && \ + chmod u+x /opt/spark/bin/* + +RUN pip3 install -q ipython + +RUN pip3 install "pyiceberg[s3fs]==${PYICEBERG_VERSION}" + +COPY entrypoint.sh . +COPY provision.py . + +ENTRYPOINT ["./entrypoint.sh"] +CMD ["notebook"] diff --git a/dev/check-license b/dev/check-license new file mode 100755 index 0000000000..6b1a9dfff2 --- /dev/null +++ b/dev/check-license @@ -0,0 +1,86 @@ +#!/usr/bin/env bash + +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +acquire_rat_jar () { + + URL="https://repo.maven.apache.org/maven2/org/apache/rat/apache-rat/${RAT_VERSION}/apache-rat-${RAT_VERSION}.jar" + + JAR="$rat_jar" + + # Download rat launch jar if it hasn't been downloaded yet + if [ ! -f "$JAR" ]; then + # Download + printf "Attempting to fetch rat\n" + JAR_DL="${JAR}.part" + if [ $(command -v curl) ]; then + curl -L --silent "${URL}" > "$JAR_DL" && mv "$JAR_DL" "$JAR" + elif [ $(command -v wget) ]; then + wget --quiet ${URL} -O "$JAR_DL" && mv "$JAR_DL" "$JAR" + else + printf "You do not have curl or wget installed, please install rat manually.\n" + exit -1 + fi + fi + + unzip -tq "$JAR" &> /dev/null + if [ $? -ne 0 ]; then + # We failed to download + rm "$JAR" + printf "Our attempt to download rat locally to ${JAR} failed. Please install rat manually.\n" + exit -1 + fi +} + +# Go to the Spark project root directory +FWDIR="$(cd "`dirname "$0"`"/..; pwd)" +cd "$FWDIR" + +if test -x "$JAVA_HOME/bin/java"; then + declare java_cmd="$JAVA_HOME/bin/java" +else + declare java_cmd=java +fi + +export RAT_VERSION=0.15 +export rat_jar="$FWDIR"/lib/apache-rat-${RAT_VERSION}.jar +mkdir -p "$FWDIR"/lib + +[[ -f "$rat_jar" ]] || acquire_rat_jar || { + echo "Download failed. Obtain the rat jar manually and place it at $rat_jar" + exit 1 +} + +mkdir -p build +$java_cmd -jar "$rat_jar" -E "$FWDIR"/dev/.rat-excludes -d "$FWDIR" > build/rat-results.txt + +if [ $? -ne 0 ]; then + echo "RAT exited abnormally" + exit 1 +fi + +ERRORS="$(cat build/rat-results.txt | grep -e "??")" + +if test ! -z "$ERRORS"; then + echo "Could not find Apache license headers in the following files:" + echo "$ERRORS" + exit 1 +else + echo -e "RAT checks passed." +fi diff --git a/dev/docker-compose-azurite.yml b/dev/docker-compose-azurite.yml new file mode 100644 index 0000000000..9be491d896 --- /dev/null +++ b/dev/docker-compose-azurite.yml @@ -0,0 +1,26 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +version: "3" + +services: + azurite: + image: mcr.microsoft.com/azure-storage/azurite + container_name: azurite + hostname: azurite + ports: + - 10000:10000 + command: ["azurite-blob", "--loose", "--blobHost", "0.0.0.0"] diff --git a/dev/docker-compose-gcs-server.yml b/dev/docker-compose-gcs-server.yml new file mode 100644 index 0000000000..2a5164c81c --- /dev/null +++ b/dev/docker-compose-gcs-server.yml @@ -0,0 +1,30 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +version: "3" + +services: + gcs-server: + image: fsouza/fake-gcs-server + container_name: gcs-server + ports: + - 4443:4443 + entrypoint: > + /bin/sh -c " + mkdir -p /data/warehouse; + /bin/fake-gcs-server -data /data -scheme http; + exit 0; + " diff --git a/dev/docker-compose-integration.yml b/dev/docker-compose-integration.yml new file mode 100644 index 0000000000..658bd698c9 --- /dev/null +++ b/dev/docker-compose-integration.yml @@ -0,0 +1,89 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +version: "3" + +services: + spark-iceberg: + image: python-integration + container_name: pyiceberg-spark + build: . + networks: + iceberg_net: + depends_on: + - rest + - minio + volumes: + - ./warehouse:/home/iceberg/warehouse + environment: + - AWS_ACCESS_KEY_ID=admin + - AWS_SECRET_ACCESS_KEY=password + - AWS_REGION=us-east-1 + ports: + - 8888:8888 + - 8080:8080 + links: + - rest:rest + - minio:minio + rest: + image: tabulario/iceberg-rest + container_name: pyiceberg-rest + networks: + iceberg_net: + ports: + - 8181:8181 + environment: + - AWS_ACCESS_KEY_ID=admin + - AWS_SECRET_ACCESS_KEY=password + - AWS_REGION=us-east-1 + - CATALOG_WAREHOUSE=s3://warehouse/ + - CATALOG_IO__IMPL=org.apache.iceberg.aws.s3.S3FileIO + - CATALOG_S3_ENDPOINT=http://minio:9000 + minio: + image: minio/minio + container_name: pyiceberg-minio + environment: + - MINIO_ROOT_USER=admin + - MINIO_ROOT_PASSWORD=password + - MINIO_DOMAIN=minio + networks: + iceberg_net: + aliases: + - warehouse.minio + ports: + - 9001:9001 + - 9000:9000 + command: ["server", "/data", "--console-address", ":9001"] + mc: + depends_on: + - minio + image: minio/mc + container_name: pyiceberg-mc + networks: + iceberg_net: + environment: + - AWS_ACCESS_KEY_ID=admin + - AWS_SECRET_ACCESS_KEY=password + - AWS_REGION=us-east-1 + entrypoint: > + /bin/sh -c " + until (/usr/bin/mc config host add minio http://minio:9000 admin password) do echo '...waiting...' && sleep 1; done; + /usr/bin/mc mb minio/warehouse; + /usr/bin/mc policy set public minio/warehouse; + tail -f /dev/null + " +networks: + iceberg_net: diff --git a/dev/docker-compose.yml b/dev/docker-compose.yml new file mode 100644 index 0000000000..817f05b56c --- /dev/null +++ b/dev/docker-compose.yml @@ -0,0 +1,47 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +version: "3" + +services: + minio: + image: minio/minio + container_name: pyiceberg-minio + environment: + - MINIO_ROOT_USER=admin + - MINIO_ROOT_PASSWORD=password + - MINIO_DOMAIN=minio + ports: + - 9001:9001 + - 9000:9000 + command: ["server", "/data", "--console-address", ":9001"] + mc: + depends_on: + - minio + image: minio/mc + container_name: pyiceberg-mc + environment: + - AWS_ACCESS_KEY_ID=admin + - AWS_SECRET_ACCESS_KEY=password + - AWS_REGION=us-east-1 + entrypoint: > + /bin/sh -c " + until (/usr/bin/mc config host add minio http://minio:9000 admin password) do echo '...waiting...' && sleep 1; done; + /usr/bin/mc rm -r --force minio/warehouse; + /usr/bin/mc mb minio/warehouse; + /usr/bin/mc policy set public minio/warehouse; + exit 0; + " diff --git a/dev/entrypoint.sh b/dev/entrypoint.sh new file mode 100755 index 0000000000..574e876c77 --- /dev/null +++ b/dev/entrypoint.sh @@ -0,0 +1,25 @@ +#!/bin/bash +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +start-master.sh -p 7077 +start-worker.sh spark://spark-iceberg:7077 +start-history-server.sh + +tail -f /dev/null diff --git a/dev/provision.py b/dev/provision.py new file mode 100644 index 0000000000..56e3459edd --- /dev/null +++ b/dev/provision.py @@ -0,0 +1,281 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from pyspark.sql import SparkSession +from pyspark.sql.functions import current_date, date_add, expr + +from pyiceberg.catalog import load_catalog +from pyiceberg.schema import Schema +from pyiceberg.types import FixedType, NestedField, UUIDType + +spark = SparkSession.builder.getOrCreate() + +spark.sql( + """ + CREATE DATABASE IF NOT EXISTS default; +""" +) + +schema = Schema( + NestedField(field_id=1, name="uuid_col", field_type=UUIDType(), required=False), + NestedField(field_id=2, name="fixed_col", field_type=FixedType(25), required=False), +) + +catalog = load_catalog( + "local", + **{ + "type": "rest", + "uri": "http://rest:8181", + "s3.endpoint": "http://minio:9000", + "s3.access-key-id": "admin", + "s3.secret-access-key": "password", + }, +) + +catalog.create_table(identifier="default.test_uuid_and_fixed_unpartitioned", schema=schema) + +spark.sql( + """ + INSERT INTO default.test_uuid_and_fixed_unpartitioned VALUES + ('102cb62f-e6f8-4eb0-9973-d9b012ff0967', CAST('1234567890123456789012345' AS BINARY)), + ('ec33e4b2-a834-4cc3-8c4a-a1d3bfc2f226', CAST('1231231231231231231231231' AS BINARY)), + ('639cccce-c9d2-494a-a78c-278ab234f024', CAST('12345678901234567ass12345' AS BINARY)), + ('c1b0d8e0-0b0e-4b1e-9b0a-0e0b0d0c0a0b', CAST('asdasasdads12312312312111' AS BINARY)), + ('923dae77-83d6-47cd-b4b0-d383e64ee57e', CAST('qweeqwwqq1231231231231111' AS BINARY)); + """ +) + +spark.sql( + """ + CREATE OR REPLACE TABLE default.test_null_nan + USING iceberg + AS SELECT + 1 AS idx, + float('NaN') AS col_numeric +UNION ALL SELECT + 2 AS idx, + null AS col_numeric +UNION ALL SELECT + 3 AS idx, + 1 AS col_numeric +""" +) + +spark.sql( + """ + CREATE OR REPLACE TABLE default.test_null_nan_rewritten + USING iceberg + AS SELECT * FROM default.test_null_nan +""" +) + +spark.sql( + """ +CREATE OR REPLACE TABLE default.test_limit as + SELECT * LATERAL VIEW explode(ARRAY(1, 2, 3, 4, 5, 6, 7, 8, 9, 10)) AS idx; +""" +) + +spark.sql( + """ +CREATE OR REPLACE TABLE default.test_positional_mor_deletes ( + dt date, + number integer, + letter string +) +USING iceberg +TBLPROPERTIES ( + 'write.delete.mode'='merge-on-read', + 'write.update.mode'='merge-on-read', + 'write.merge.mode'='merge-on-read', + 'format-version'='2' +); +""" +) + +# Partitioning is not really needed, but there is a bug: +# https://github.com/apache/iceberg/pull/7685 +spark.sql( + """ + ALTER TABLE default.test_positional_mor_deletes ADD PARTITION FIELD years(dt) AS dt_years +""" +) + +spark.sql( + """ +INSERT INTO default.test_positional_mor_deletes +VALUES + (CAST('2023-03-01' AS date), 1, 'a'), + (CAST('2023-03-02' AS date), 2, 'b'), + (CAST('2023-03-03' AS date), 3, 'c'), + (CAST('2023-03-04' AS date), 4, 'd'), + (CAST('2023-03-05' AS date), 5, 'e'), + (CAST('2023-03-06' AS date), 6, 'f'), + (CAST('2023-03-07' AS date), 7, 'g'), + (CAST('2023-03-08' AS date), 8, 'h'), + (CAST('2023-03-09' AS date), 9, 'i'), + (CAST('2023-03-10' AS date), 10, 'j'), + (CAST('2023-03-11' AS date), 11, 'k'), + (CAST('2023-03-12' AS date), 12, 'l'); +""" +) + +spark.sql( + """ +ALTER TABLE default.test_positional_mor_deletes CREATE TAG tag_12 + """ +) + +spark.sql( + """ +ALTER TABLE default.test_positional_mor_deletes CREATE BRANCH without_5 + """ +) + +spark.sql( + """ +DELETE FROM default.test_positional_mor_deletes.branch_without_5 WHERE number = 5 + """ +) + + +spark.sql( + """ +DELETE FROM default.test_positional_mor_deletes WHERE number = 9 +""" +) + +spark.sql( + """ + CREATE OR REPLACE TABLE default.test_positional_mor_double_deletes ( + dt date, + number integer, + letter string + ) + USING iceberg + TBLPROPERTIES ( + 'write.delete.mode'='merge-on-read', + 'write.update.mode'='merge-on-read', + 'write.merge.mode'='merge-on-read', + 'format-version'='2' + ); +""" +) + +# Partitioning is not really needed, but there is a bug: +# https://github.com/apache/iceberg/pull/7685 +spark.sql( + """ + ALTER TABLE default.test_positional_mor_double_deletes ADD PARTITION FIELD years(dt) AS dt_years +""" +) + +spark.sql( + """ +INSERT INTO default.test_positional_mor_double_deletes +VALUES + (CAST('2023-03-01' AS date), 1, 'a'), + (CAST('2023-03-02' AS date), 2, 'b'), + (CAST('2023-03-03' AS date), 3, 'c'), + (CAST('2023-03-04' AS date), 4, 'd'), + (CAST('2023-03-05' AS date), 5, 'e'), + (CAST('2023-03-06' AS date), 6, 'f'), + (CAST('2023-03-07' AS date), 7, 'g'), + (CAST('2023-03-08' AS date), 8, 'h'), + (CAST('2023-03-09' AS date), 9, 'i'), + (CAST('2023-03-10' AS date), 10, 'j'), + (CAST('2023-03-11' AS date), 11, 'k'), + (CAST('2023-03-12' AS date), 12, 'l'); +""" +) + +spark.sql( + """ + DELETE FROM default.test_positional_mor_double_deletes WHERE number = 9 +""" +) + +spark.sql( + """ + DELETE FROM default.test_positional_mor_double_deletes WHERE letter == 'f' +""" +) + +all_types_dataframe = ( + spark.range(0, 5, 1, 5) + .withColumnRenamed("id", "longCol") + .withColumn("intCol", expr("CAST(longCol AS INT)")) + .withColumn("floatCol", expr("CAST(longCol AS FLOAT)")) + .withColumn("doubleCol", expr("CAST(longCol AS DOUBLE)")) + .withColumn("dateCol", date_add(current_date(), 1)) + .withColumn("timestampCol", expr("TO_TIMESTAMP(dateCol)")) + .withColumn("stringCol", expr("CAST(dateCol AS STRING)")) + .withColumn("booleanCol", expr("longCol > 5")) + .withColumn("binaryCol", expr("CAST(longCol AS BINARY)")) + .withColumn("byteCol", expr("CAST(longCol AS BYTE)")) + .withColumn("decimalCol", expr("CAST(longCol AS DECIMAL(10, 2))")) + .withColumn("shortCol", expr("CAST(longCol AS SHORT)")) + .withColumn("mapCol", expr("MAP(longCol, decimalCol)")) + .withColumn("arrayCol", expr("ARRAY(longCol)")) + .withColumn("structCol", expr("STRUCT(mapCol, arrayCol)")) +) + +all_types_dataframe.writeTo("default.test_all_types").tableProperty("format-version", "2").partitionedBy( + "intCol" +).createOrReplace() + +for table_name, partition in [ + ("test_partitioned_by_identity", "ts"), + ("test_partitioned_by_years", "years(dt)"), + ("test_partitioned_by_months", "months(dt)"), + ("test_partitioned_by_days", "days(ts)"), + ("test_partitioned_by_hours", "hours(ts)"), + ("test_partitioned_by_truncate", "truncate(1, letter)"), + ("test_partitioned_by_bucket", "bucket(16, number)"), +]: + spark.sql( + f""" + CREATE OR REPLACE TABLE default.{table_name} ( + dt date, + ts timestamp, + number integer, + letter string + ) + USING iceberg; + """ + ) + + spark.sql(f"ALTER TABLE default.{table_name} ADD PARTITION FIELD {partition}") + + spark.sql( + f""" + INSERT INTO default.{table_name} + VALUES + (CAST('2022-03-01' AS date), CAST('2022-03-01 01:22:00' AS timestamp), 1, 'a'), + (CAST('2022-03-02' AS date), CAST('2022-03-02 02:22:00' AS timestamp), 2, 'b'), + (CAST('2022-03-03' AS date), CAST('2022-03-03 03:22:00' AS timestamp), 3, 'c'), + (CAST('2022-03-04' AS date), CAST('2022-03-04 04:22:00' AS timestamp), 4, 'd'), + (CAST('2023-03-05' AS date), CAST('2023-03-05 05:22:00' AS timestamp), 5, 'e'), + (CAST('2023-03-06' AS date), CAST('2023-03-06 06:22:00' AS timestamp), 6, 'f'), + (CAST('2023-03-07' AS date), CAST('2023-03-07 07:22:00' AS timestamp), 7, 'g'), + (CAST('2023-03-08' AS date), CAST('2023-03-08 08:22:00' AS timestamp), 8, 'h'), + (CAST('2023-03-09' AS date), CAST('2023-03-09 09:22:00' AS timestamp), 9, 'i'), + (CAST('2023-03-10' AS date), CAST('2023-03-10 10:22:00' AS timestamp), 10, 'j'), + (CAST('2023-03-11' AS date), CAST('2023-03-11 11:22:00' AS timestamp), 11, 'k'), + (CAST('2023-03-12' AS date), CAST('2023-03-12 12:22:00' AS timestamp), 12, 'l'); + """ + ) diff --git a/dev/run-azurite.sh b/dev/run-azurite.sh new file mode 100755 index 0000000000..c218155894 --- /dev/null +++ b/dev/run-azurite.sh @@ -0,0 +1,33 @@ +#!/bin/bash +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +set -ex + +if [ $(docker ps -q --filter "name=azurite" --filter "status=running" ) ]; then + echo "Azurite backend running" +else + docker-compose -f dev/docker-compose-azurite.yml kill + docker-compose -f dev/docker-compose-azurite.yml up -d + while [ -z $(docker ps -q --filter "name=azurite" --filter "status=running" ) ] + do + echo "Waiting for Azurite" + sleep 1 + done +fi diff --git a/dev/run-gcs-server.sh b/dev/run-gcs-server.sh new file mode 100644 index 0000000000..289d89009a --- /dev/null +++ b/dev/run-gcs-server.sh @@ -0,0 +1,33 @@ +#!/bin/bash +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +set -ex + +if [ $(docker ps -q --filter "name=gcs-server" --filter "status=running" ) ]; then + echo "Fake GCS Server running" +else + docker-compose -f dev/docker-compose-gcs-server.yml kill + docker-compose -f dev/docker-compose-gcs-server.yml up -d + while [ -z $(docker ps -q --filter "name=gcs-server" --filter "status=running" ) ] + do + echo "Waiting for Fake GCS Server" + sleep 1 + done +fi diff --git a/dev/run-minio.sh b/dev/run-minio.sh new file mode 100755 index 0000000000..0db37012e7 --- /dev/null +++ b/dev/run-minio.sh @@ -0,0 +1,33 @@ +#!/bin/bash +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +set -ex + +if [ $(docker ps -q --filter "name=pyiceberg-minio" --filter "status=running" ) ]; then + echo "Minio backend running" +else + docker-compose -f dev/docker-compose.yml kill + docker-compose -f dev/docker-compose.yml up -d + while [ -z $(docker ps -q --filter "name=pyiceberg-minio" --filter "status=running" ) ] + do + echo "Waiting for Minio" + sleep 1 + done +fi diff --git a/dev/spark-defaults.conf b/dev/spark-defaults.conf new file mode 100644 index 0000000000..28f93b15a6 --- /dev/null +++ b/dev/spark-defaults.conf @@ -0,0 +1,29 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +spark.sql.extensions org.apache.iceberg.spark.extensions.IcebergSparkSessionExtensions +spark.sql.catalog.demo org.apache.iceberg.spark.SparkCatalog +spark.sql.catalog.demo.type rest +spark.sql.catalog.demo.uri http://rest:8181 +spark.sql.catalog.demo.io-impl org.apache.iceberg.aws.s3.S3FileIO +spark.sql.catalog.demo.warehouse s3a://warehouse/wh/ +spark.sql.catalog.demo.s3.endpoint http://minio:9000 +spark.sql.defaultCatalog demo +spark.eventLog.enabled true +spark.eventLog.dir /home/iceberg/spark-events +spark.history.fs.logDirectory /home/iceberg/spark-events +spark.sql.catalogImplementation in-memory diff --git a/mkdocs/README.md b/mkdocs/README.md new file mode 100644 index 0000000000..e9e0462bee --- /dev/null +++ b/mkdocs/README.md @@ -0,0 +1,28 @@ + + +# Docs + +The pyiceberg docs are stored in `docs/`. + +## Running docs locally + +```sh +pip3 install -r requirements.txt +mkdocs serve +open http://localhost:8000/ +``` diff --git a/mkdocs/docs/SUMMARY.md b/mkdocs/docs/SUMMARY.md new file mode 100644 index 0000000000..46383823ec --- /dev/null +++ b/mkdocs/docs/SUMMARY.md @@ -0,0 +1,31 @@ + + + + +- [Home](index.md) +- [Configuration](configuration.md) +- [CLI](cli.md) +- [API](api.md) +- [Contributing](contributing.md) +- [Feature support](feature-support.md) +- Releases + - [Verify a release](verify-release.md) + - [How to release](how-to-release.md) +- [Code Reference](reference/) + + diff --git a/mkdocs/docs/api.md b/mkdocs/docs/api.md new file mode 100644 index 0000000000..55eadc5f5b --- /dev/null +++ b/mkdocs/docs/api.md @@ -0,0 +1,424 @@ +--- +hide: + - navigation +--- + + + +# Python API + +PyIceberg is based around catalogs to load tables. First step is to instantiate a catalog that loads tables. Let's use the following configuration to define a catalog called `prod`: + +```yaml +catalog: + prod: + uri: http://rest-catalog/ws/ + credential: t-1234:secret +``` + +This information must be placed inside a file called `.pyiceberg.yaml` located either in the `$HOME` or `%USERPROFILE%` directory (depending on whether the operating system is Unix-based or Windows-based, respectively) or in the `$PYICEBERG_HOME` directory (if the corresponding environment variable is set). + +For more details on possible configurations refer to the [specific page](https://py.iceberg.apache.org/configuration/). + +Then load the `prod` catalog: + +```python +from pyiceberg.catalog import load_catalog + +catalog = load_catalog( + "docs", + **{ + "uri": "http://127.0.0.1:8181", + "s3.endpoint": "http://127.0.0.1:9000", + "py-io-impl": "pyiceberg.io.pyarrow.PyArrowFileIO", + "s3.access-key-id": "admin", + "s3.secret-access-key": "password", + } +) +``` + +Let's create a namespace: + +```python +catalog.create_namespace("docs_example") +``` + +And then list them: + +```python +ns = catalog.list_namespaces() + +assert ns == [("docs_example",)] +``` + +And then list tables in the namespace: + +```python +catalog.list_tables("docs_example") +``` + +## Create a table + +To create a table from a catalog: + +```python +from pyiceberg.schema import Schema +from pyiceberg.types import ( + TimestampType, + FloatType, + DoubleType, + StringType, + NestedField, + StructType, +) + +schema = Schema( + NestedField(field_id=1, name="datetime", field_type=TimestampType(), required=True), + NestedField(field_id=2, name="symbol", field_type=StringType(), required=True), + NestedField(field_id=3, name="bid", field_type=FloatType(), required=False), + NestedField(field_id=4, name="ask", field_type=DoubleType(), required=False), + NestedField( + field_id=5, + name="details", + field_type=StructType( + NestedField( + field_id=4, name="created_by", field_type=StringType(), required=False + ), + ), + required=False, + ), +) + +from pyiceberg.partitioning import PartitionSpec, PartitionField +from pyiceberg.transforms import DayTransform + +partition_spec = PartitionSpec( + PartitionField( + source_id=1, field_id=1000, transform=DayTransform(), name="datetime_day" + ) +) + +from pyiceberg.table.sorting import SortOrder, SortField +from pyiceberg.transforms import IdentityTransform + +# Sort on the symbol +sort_order = SortOrder(SortField(source_id=2, transform=IdentityTransform())) + +catalog.create_table( + identifier="docs_example.bids", + schema=schema, + partition_spec=partition_spec, + sort_order=sort_order, +) +``` + +## Load a table + +### Catalog table + +Loading the `bids` table: + +```python +table = catalog.load_table("docs_example.bids") +# Equivalent to: +table = catalog.load_table(("docs_example", "bids")) +# The tuple syntax can be used if the namespace or table contains a dot. +``` + +This returns a `Table` that represents an Iceberg table that can be queried and altered. + +### Static table + +To load a table directly from a metadata file (i.e., **without** using a catalog), you can use a `StaticTable` as follows: + +```python +from pyiceberg.table import StaticTable + +static_table = StaticTable.from_metadata( + "s3://warehouse/wh/nyc.db/taxis/metadata/00002-6ea51ce3-62aa-4197-9cf8-43d07c3440ca.metadata.json" +) +``` + +The static-table is considered read-only. + +## Schema evolution + +PyIceberg supports full schema evolution through the Python API. It takes care of setting the field-IDs and makes sure that only non-breaking changes are done (can be overriden). + +In the examples below, the `.update_schema()` is called from the table itself. + +```python +with table.update_schema() as update: + update.add_column("some_field", IntegerType(), "doc") +``` + +You can also initiate a transaction if you want to make more changes than just evolving the schema: + +```python +with table.transaction() as transaction: + with transaction.update_schema() as update_schema: + update.add_column("some_other_field", IntegerType(), "doc") + # ... Update properties etc +``` + +### Add column + +Using `add_column` you can add a column, without having to worry about the field-id: + +```python +with table.update_schema() as update: + update.add_column("retries", IntegerType(), "Number of retries to place the bid") + # In a struct + update.add_column("details.confirmed_by", StringType(), "Name of the exchange") +``` + +### Rename column + +Renaming a field in an Iceberg table is simple: + +```python +with table.update_schema() as update: + update.rename("retries", "num_retries") + # This will rename `confirmed_by` to `exchange` + update.rename("properties.confirmed_by", "exchange") +``` + +### Move column + +Move a field inside of struct: + +```python +with table.update_schema() as update: + update.move_first("symbol") + update.move_after("bid", "ask") + # This will move `confirmed_by` before `exchange` + update.move_before("details.created_by", "details.exchange") +``` + +### Update column + +Update a fields' type, description or required. + +```python +with table.update_schema() as update: + # Promote a float to a double + update.update_column("bid", field_type=DoubleType()) + # Make a field optional + update.update_column("symbol", required=False) + # Update the documentation + update.update_column("symbol", doc="Name of the share on the exchange") +``` + +Be careful, some operations are not compatible, but can still be done at your own risk by setting `allow_incompatible_changes`: + +```python +with table.update_schema(allow_incompatible_changes=True) as update: + # Incompatible change, cannot require an optional field + update.update_column("symbol", required=True) +``` + +### Delete column + +Delete a field, careful this is a incompatible change (readers/writers might expect this field): + +```python +with table.update_schema(allow_incompatible_changes=True) as update: + update.delete_column("some_field") +``` + +## Table properties + +Set and remove properties through the `Transaction` API: + +```python +with table.transaction() as transaction: + transaction.set_properties(abc="def") + +assert table.properties == {"abc": "def"} + +with table.transaction() as transaction: + transaction.remove_properties("abc") + +assert table.properties == {} +``` + +Or, without context manager: + +```python +table = table.transaction().set_properties(abc="def").commit_transaction() + +assert table.properties == {"abc": "def"} + +table = table.transaction().remove_properties("abc").commit_transaction() + +assert table.properties == {} +``` + +## Query the data + +To query a table, a table scan is needed. A table scan accepts a filter, columns, optionally a limit and a snapshot ID: + +```python +from pyiceberg.catalog import load_catalog +from pyiceberg.expressions import GreaterThanOrEqual + +catalog = load_catalog("default") +table = catalog.load_table("nyc.taxis") + +scan = table.scan( + row_filter=GreaterThanOrEqual("trip_distance", 10.0), + selected_fields=("VendorID", "tpep_pickup_datetime", "tpep_dropoff_datetime"), + limit=100, +) + +# Or filter using a string predicate +scan = table.scan( + row_filter="trip_distance > 10.0", +) + +[task.file.file_path for task in scan.plan_files()] +``` + +The low level API `plan_files` methods returns a set of tasks that provide the files that might contain matching rows: + +```json +[ + "s3://warehouse/wh/nyc/taxis/data/00003-4-42464649-92dd-41ad-b83b-dea1a2fe4b58-00001.parquet" +] +``` + +In this case it is up to the engine itself to filter the file itself. Below, `to_arrow()` and `to_duckdb()` that already do this for you. + +### Apache Arrow + + + +!!! note "Requirements" + This requires [PyArrow to be installed](index.md). + + + +Using PyIceberg it is filter out data from a huge table and pull it into a PyArrow table: + +```python +table.scan( + row_filter=GreaterThanOrEqual("trip_distance", 10.0), + selected_fields=("VendorID", "tpep_pickup_datetime", "tpep_dropoff_datetime"), +).to_arrow() +``` + +This will return a PyArrow table: + +``` +pyarrow.Table +VendorID: int64 +tpep_pickup_datetime: timestamp[us, tz=+00:00] +tpep_dropoff_datetime: timestamp[us, tz=+00:00] +---- +VendorID: [[2,1,2,1,1,...,2,2,2,2,2],[2,1,1,1,2,...,1,1,2,1,2],...,[2,2,2,2,2,...,2,6,6,2,2],[2,2,2,2,2,...,2,2,2,2,2]] +tpep_pickup_datetime: [[2021-04-01 00:28:05.000000,...,2021-04-30 23:44:25.000000]] +tpep_dropoff_datetime: [[2021-04-01 00:47:59.000000,...,2021-05-01 00:14:47.000000]] +``` + +This will only pull in the files that that might contain matching rows. + +### DuckDB + + + +!!! note "Requirements" + This requires [DuckDB to be installed](index.md). + + + +A table scan can also be converted into a in-memory DuckDB table: + +```python +con = table.scan( + row_filter=GreaterThanOrEqual("trip_distance", 10.0), + selected_fields=("VendorID", "tpep_pickup_datetime", "tpep_dropoff_datetime"), +).to_duckdb(table_name="distant_taxi_trips") +``` + +Using the cursor that we can run queries on the DuckDB table: + +```python +print( + con.execute( + "SELECT tpep_dropoff_datetime - tpep_pickup_datetime AS duration FROM distant_taxi_trips LIMIT 4" + ).fetchall() +) +[ + (datetime.timedelta(seconds=1194),), + (datetime.timedelta(seconds=1118),), + (datetime.timedelta(seconds=1697),), + (datetime.timedelta(seconds=1581),), +] +``` + +### Ray + + + +!!! note "Requirements" + This requires [Ray to be installed](index.md). + + + +A table scan can also be converted into a Ray dataset: + +```python +ray_dataset = table.scan( + row_filter=GreaterThanOrEqual("trip_distance", 10.0), + selected_fields=("VendorID", "tpep_pickup_datetime", "tpep_dropoff_datetime"), +).to_ray() +``` + +This will return a Ray dataset: + +``` +Dataset( + num_blocks=1, + num_rows=1168798, + schema={ + VendorID: int64, + tpep_pickup_datetime: timestamp[us, tz=UTC], + tpep_dropoff_datetime: timestamp[us, tz=UTC] + } +) +``` + +Using [Ray Dataset API](https://docs.ray.io/en/latest/data/api/dataset.html) to interact with the dataset: + +```python +print(ray_dataset.take(2)) +[ + { + "VendorID": 2, + "tpep_pickup_datetime": datetime.datetime(2008, 12, 31, 23, 23, 50), + "tpep_dropoff_datetime": datetime.datetime(2009, 1, 1, 0, 34, 31), + }, + { + "VendorID": 2, + "tpep_pickup_datetime": datetime.datetime(2008, 12, 31, 23, 5, 3), + "tpep_dropoff_datetime": datetime.datetime(2009, 1, 1, 16, 10, 18), + }, +] +``` diff --git a/mkdocs/docs/assets/images/iceberg-logo-icon.png b/mkdocs/docs/assets/images/iceberg-logo-icon.png new file mode 100644 index 0000000000000000000000000000000000000000..e4a99c3951e6aa99d6e44575f236f32aeb5710eb GIT binary patch literal 17608 zcmbXKcR1Dm`v8ugWAA+sSqbGFl*q`IGD68X_90}?j0h*QC`5?t%yW+8WN!!ADI+Un zWJN-e6~2%2dVj9#`}^nj&)0Qzb)Cn!_kBOseLtVyHMq@4$3q8!Kp1tjZyG@$WJuCK z8Yhxv?{CDn=7V^=3FQ-R=cD{}fbq6ndM?oDAyN8ZOj&=@@efk`gAds7`Iycpg zgTDTpolbl`k+HR7vRa(aTpw}_`CIQT$}8D@{hotMK}N4*wUiS+;tc)jZJXdq8t+?P zPY<^>&4N8@SJ$EN%J&n2$Y^i?q-omv5ZU$ex_V$syJ2Y zqPw+$iqQ!fz@S8bpK*`C=8yE4C8zw8sdI27O>47@hEgCP2~n3HH&3z_qv}lAY85rj z;vJzRl4$S8rd-(*x9i^+8hb%zmLfuG-Hk2Vxp+tCS+KRTQcJA2_PlFA6rNL$jD-N% z<($`*TIz0Z#N?N|suHb3UjPBlc7`0yZ5^bJ<-V9!zS?yK4M0 zFbYEjk090*^iao*J_Fd+t|4W=bV{h*5{DSNVggLuGK+vphynQ+{?IqN5hmF%Xk+qU zw@@PI1vMg>Vx&nUWzg#}2-O8Bd{!gtfdXjkpf{=9+^;RLpnRidLQjDZB%sE$HO4rw zP=|uDwJ|a%{q}XFPTo<2eKT27dKyLf+K$R}Ea1vBR=gGpy`g_QG37-tiE}to=S}eK ztM*WW^)*@x{WLNP1o0bc?C!1^6i+2_R1J_J7)*`cyApWNqZSzJ;S0`B(UdGW2un5K zkH!=-BUr-d=zUp?S)E7*e$rR~EEI5vr}O1nrvIO&S_*hZ?J0jdof?<=Mi57}K@|rW zXi1H2bTz{jBN-ve&f-*Q5ok&!R@?~Ok?hHLNrM4D@f8WN_SpB z&chB9O&>)T^bL6rfD^}LYE+Y~g1u}?j*k3<`CGeSo<;fiHZ10`Gu9V*B_? zZEtQp+^Xc@=71p>cyNWTBGXTAn=?ZAT`p(Ns}nU07UJNZkb?wxpc+xbsXYz@Lnl>p zj3IWP@gsJ5@FO+=4*KqwcRgaePWfEMgdqONIhU&(mole6+2nRRMz(CR~#P z>@l$69q?n*I0}eTe%aSC9H$fTj-i1>IV*qE{`STp?<6g%Ki*+oK3dD18)sitR~aDg@$DoRZg- zCIqF2Qo(B}pv3dF475m^D^@&DD-?KkkFQQn2n}9 z1{xmz$OOS3Uuh8FOn4AKl;|y@y7Q!07lM{6KHKbi^#DN*r9X_u2ExQBp~Tx+S1zK2 zO1wo(fEkcMnnm=tyjUt}kiDYFt1(bW2c3D3yqz{N;?TQ`n+ifh|KL0gQqT$tp)t36 za3}J{0>+@844v1d>Tr9GIiZr{@4JZQTlI=rQCJ^9eBM`O^HCByAP9(1Jem?%6gr8L zjB|kwqSg!i`u|URyth_88H7Z}|4<2{Sn&`@6oQ37148-#+fMSO|Fv5&ntj))SN!M- zfk^M(;;767SmV7ZvC7x1OpgQ2zU1h>>VnABHxddYKUb8F?J;t8z989=Bamwp9O+nU zeD`?Y_{+7XevH-90QHc5$5?cOi2rdrPrQ2X1ynv3y*IGddC#;%pIuXA@29uODGjl? zX~1p0<7aX(;g;;c-CjkOXq*D@`Q_mHqbo;Ox^(Nc#Ep&0lH*d1@el_+jJX+?>Dcn2 zBd4Zn8LEW@wc$MeTf9p1pl-cpFs86h7h~d1PIlN-SBXDWtm6o$AW4kY?EY{pvpKDt z_UKX7@B>``bwQMH=KRYy31s6})1IQ2jp~!bu(p+K0V(Be4`bl2XP%kkP%dr5@vbJB zc6K1b{1`aZa|_RA)1xq67Ier(l+I8Atqm{LD>ed-*^CX-dckTcw%DI2RcUg`GN#g> zM&jEGjd2$M+tIDwemvFD*@F7tE%xT?9|6hcyo5LYj;xvEPYFX566<;(Su4c;P^{rUwY8sGf-;V`oa8_h##(4Rw zZvcKM!-aCEVTw6+15P%AG_#r2eAd%y7wZn#G#WxBqHHcbV(0l>^nib8*MQiMu+kwE zl+6mdSfuCYvEd+UaxM{VER+%$bL$&?lsGwaDzdabZ>{@HLNev#y=?m1%I|(;qa6`e zFU1xxpIp~8M-|aZBxFyz=@rq2MJLt3rutsFEqp^=uDwl(ZSodrUuQS9 z_a3`sd@}ms3OzLF*T~Q0s_^~gOI8wipMGy@F_PN`n#YvG%p>?ZO)|&x+x?ob2r>xP zB#sipd?Q5cvI-hKCP!;_$8|ytW5%xeQ{m`n51-kn;>knWlYo7=mp4Zvj{DFv&wSa=C7i(A+QoP}e6p z_O2%cxS|`@TSSUQ(r4VcP(1SZ%@-!3{utSuXvmdR4iLzLBckBCSv<;hO&u2?-urJI z@|C&hXTt>?u}SE?XKR!@we}YyO^quc2WW9qL-~uhh`|@Ft|qrb00~b7VYmyzyuriT zahwzgf@~=-U`D0vz5XONFuB!<%(OHa>hf`Gh{Q?m6?W`HqiUL?A>>d6cP-UmUN9w5 zZpLsjJ+F>VGrZ$0vi)*KU%M*g zbHuDT!|xk*$;DGVGUyxB&;$2p9vxx5rVAWUI71eX3{b)to9Zld+bsH+_IS_6nOr-P z9;p-Oxot6#2q>9@EhcHfu*~fqFseOqosnzl_juIy=+rK~c|KP6Zg_uz3OZo8b-9-6 zB_L6dqew$2EKjO^#stZxN@y_?~vx&pYHq%EZ+X8U>9M{t=z(i^=LJ;vuS_J^j>{3 zYR}JRV1j}cSiu1vlTD_Yu zt;-(9er?x1!pX}rEk%lttE~@rP7maTR-mr_>1X-Tq`0jV-Hsar^cZpAy89{Mf?grw zMfJ(itt~0lzgY-Ytjcy)EAB&X6+a)aN?#Nsvz;_~@peK42_^I(Uy6^Tn-MWV=LpHl z){=hLZ(_`3>K^WPXUpJ^gD$XacU>x>rS_L``5}Z-Q0sslo5h?zYQGqd`W;`H_vKpx z`%@V_c|eMU#+3L6wPvjSV=h8+%S_$l^2~eKv@dXK7f(4DZiM}*Av+FIGW1i0&^f%n zb(xIwvOE&7C(2|#bU5nO<#t~Wf)Tg}Q2Z@sJab8U(YFmK z$h>(?a8GsWe>+k4*U@pl+>GgjO)N5a8a9MpxXUv^Yl>E_SG*mcdu-J8TNEWCywBTw zeb7#(6YbTi^Zn>l2tZH~=cJ3mwF4LhV?8BwoGoTleKqpN6-SRdJ>>9y7g%V>r=u+H z9A0_yB_5rcrBwPSQ)aa$J@)}?u;9y$AJ5fXti+H|yVBIS9ddHN*5#63_NUq2Wqw}m z;vQD3Hw9TlF-Qw-*ualzWEL~BFIC>(CZ1~IO-kgAWcH`W3aoDiEvi}xGulNQ`w#3; z26dOeAiT(48BB+Y=8S@E}F75pz4?=W!B%h{z-Jg1`JVV<0F!wo^^gk z5n1$dBa4P&yZ*WLdAgAgQy1c@pzt!AldOpx1?l(M>n5v)MVsAw0=^emh)fQcraOT> z#H@1r2CMdhV8=IB@sR=F8)p~~$ayB9yQu2!I|%(OoV~(1La9wgPtm8u9GV$RAtZF| zB?Dj4lr_B=7Kt|{^)bDd(x(V1tIDEz-R%z~ImB9-7wu<)SS8~e!@4)~%>T`k!B50a zi;=llQRk6gdtdE?k6X5<)cbY5Fxc^~cDmv~t?lY_A`kf+sPo0aUm@I6dGYs6a2M!b z^tAZ)o_)ja-UJYU3eY+aWHHE08LP&3LuKf$?8p_MqLh$W1MTsNiD6^-sRe|E^HtJ4U;I{wHbTtks+K4pOf@AFr*j< zMkaVWB;$Ee3VRo4x;QfHyv)avZ`f*|Z1$%R+<}wjr7M-{x$pXJLdnMSlHX-8+T33` z@Gh%zKi=GwXVw0$2zh&D%9@wf%!{3{8x%j0VlHXBKJ)m(wo3#IqSrSYCDY{g-~^wx z2Zg&hyKdRU67$X^ng8QF6h1#=A zy*2VNe7^HWtGhSn|1)!M0bs@EmR*&~sH?~?S1jL7kN&5$ZT)iN7Xm|zrR9;g%R^&Q zDs8hT5)`i6tRwzkn;y8;UGO`UEHJ@ALB9jYn&42{M5mX@X4(!3unkwEJhaI3v?YsvCr=6Xd;#N z5{AH-Q8_*?tCGPF%%WBEeE1;OMM9A?hNXiXP&ay+y0?LA)F~Mhz>wy@(Y4b-Y8lOC zgWt=%$4t%E(s3^I3ua7u{6>|1%q^hZogVqIRl4^}(WjAm8v)N; znnD0OSlBWuq(1j9ms{p+IW2NH;=HL$9dA+9zHaS_BCw?TJt*YpOg+^@BO6F75jdB; z)sK2kG3EaQ4^b|R(5h;>ZtBkdm4mR>Cha^l%;54}QH<;aQ-RXQ#So7ApN%{BxX*sFxy||Rd!O_>^DeHGLfnDxpsFN2H5egOj>nBU7;0pwj zC-koH`sSu=Z)bnl4V#Ya-VHAE`}&`&^zx_7)|@l+jG>hS#|{SSNl62y z{@S_!i`!}{OP(DT&mT->E;ZgTWe9~nsU5A62Zop{iiDzJ&H)on;!`5@QYatU1ar&m z^LGp>`j-bl^nrxn$}6x%sW48B+!^Ux3SSIo0;S)d?5wBC;1h4Da)a$IB*5I<+C;AH ztx4e4oLQH(;pHxQHfx7;cg4gr?{Ah-UOO{yg(emY|Gow>qsWJJ@C%^Cx&?Y-+QN@z z7y4hkd+fo!V%n}-OU?XeW+NRKsTi3febP5^2w`AxgxvqnFZbP@uw%J`yC&y(gssle znTyAYo90+XG3NMP!=xtj@QIfxi;&J<@gL`G?*^{4g>d6WyhT#67le5j`W)X9vi^uM zu-g*p2iE^h`0Y^a>-q@e?O(ojcA`c%&se`n48b|*ZTm!jrCI35Ssk_b{b;wZFMYiF z>GKBxv+>l%QJ4{OVQm zTY_cKn#EYRToo{@9JxU=@ebt=r$hcu`i3}%uy~Nh2jQkQt=j+9y`US`lB(gzwIF_@ z(k(!7;0$%O$B-i?H0;;c; zfWb<1b~Bl2qU9Qiv;D3bM*qF`m`0B1@}_%37GiEtwYhe2{u;uSHV=Yp z_^Jd}!#^r|AZfq+QV7oXI zKvAjV4pMY+RAa!CDo+{iup8;mrYm#waD^edX{`%4}DrL}Xj zWaQ`jVkEt?VC2ik3`n*T5+l3u5c%WyS3eFO?^yZP^zSBn$9Q3!NC};r$+eK+krENJ zof-3~yTZ#FPx>B3kM=?k;Qq*?Wpxy79Rqi=HZOnG6<)YDEf!U-Q{On$(;H)>L%HE8 zZKx;Ob#9K4O=w#h6zYSu<3udLy7f~X)y>>aH|3ara=kM{Yjgq1C z2)ef=l#u4v{|MNPr|P|^lW;U?cip!7s?Ia}`QhEJV5_esdv%n+6dZ*4YPKh^)$2pmdt8{X_0+T5O4_;O%C*d|O7DJUxQ+xA`;rXwcl7XMB)CC4 zsz8V==oU9MsR5G~o!HUN<$S>3aou#8*YXlD2^WI2vTHt;1U^?Livf90+I@RwYVgZR zRB>Ub1y@Q4vl)#AdZK;Zo-S$rRrx&kC|*o|cE-Ni8b(P*amNd2(nOpw@y(j)T`{Yx0u8$Er;JYqRgD10+%6HQrwhlVR zm>4bqr~0NCwD{tcM@K<>o44}Ol~4Cqsmkmh|4G?yG`gm@sUmR*7)IqD9@F~}8kPJc z=GdR!2upVuH-XI1A>&3P4PIp+YZKkqQbC+Z*$Bc*`S3BLA|HL}kgM~YWF0&U97DNxGmSJ#$nw`^Q*^s$ld)$@jIN6qlz#)9@)9qFLmAwBl?C#@qTsh$GA zF4wKPFs29bK~AW!df^R)_!_*zvBH%NaiqT(Sz1BxG6INlv%C50{+*x_rzO1R<6h!O z%NSXxg<86k*C$NH+25MR$D75Yxzttoh~iLBF(|#^!S8=}lA!RK*H%5kp%xExOyX4} zP=8P&F!i)Yr+l@W==J~;VX|80`_}mVlCfR6ZckuI&__*t$ff3p!Hk|2k=q zlgToT1M|a4EB1^9e-!zFaz`WdmM-S)A`kg-izIJyNV2^{fx!3f^iq%Rq7pQBPr^1jPod-U15(75UmecT7P*Mw0$W!{jW=(zfxU#LjrZcE7viBus_s$ zJ%(r8rr@R!R-6ClV*YbFPGKG;;pr$LW)Qf)hgyuV*uIOUQJ{iXI3fM54N&8-#tTra zqct%uSzw`m6utKqejFj7gwcGMJN_*$ZcCNoj^1qv?Rq*%qzK7>){>Xr6#GN<-Yc(_ zOMRZZdv^4l3rVBDs8TMgz*wiA%lU#D?ggT1q`6t|<(@SrBx4S*s!r9ho7C2W?NX=g zKh=vN%;)|<7&Xq-44IJV41Twn$6ulNNQ)}5|52a;N2UE4cqF1SsZW*o4Xn!GU}yf~#c&Dix&q~;7L$<& zT7<7;RrtJEf6{*cX_EN(tK2W<&(4!fmtZVzvFmckhl>!X8FC~sFSC*{dYuwMp+S{+ zI*@3Ka)_i%OTNln7vJ~SF8Z-WtM+Y)k5<`2$jvBovmv)TJpu*SP_jV=9X`K0UIcG0 zuOeymNE-s@S<(u7ZzyxS*%144N6yFzxyMR!ik9l1y*qI?!=W4h7|GJ23k~73Dl;j@ z)lYto@?ej+2;4P84_g5p?kDPZzvu3W51eBH5!y2PV^`%w%h<~U_1v^sPn)K5`(~`T zBN||a5$DED2=ZFo+*FB*Y%xDZE7{(0QZbM^6fAz<8Ln+(nGuS-T`~LSq0sqPe_c*I zz)s5c>s>g5(&=0DjsYY~4N&y>^@8yD5H?z_BvJ*jdBM36G% z;AfxAaU_Yn-#Y~o(0GXsqRzV;0N&rA+;Qiwt&N}nI@6DYenAqp!?I~M0>u9*8Wwf; zxSG2rNs~13{4HX{bu+JzJ(0IF;$aCaN7YL=^RlQeA|1hcB8Upz(n@2~sHLSvF8ug8 znjDw*an!fPF#aBRU?Pc{jJ!_e2OZKshrSTHQDgS^YLLv`krLUe*X{!SEwLv102(PI zvEqD*IQV2I2J4HSOkedX4L{kTN8BV98oC8I3WtI1gY(qsMi8JWQMUmnUZnePgSyDB z`zn#QIXORDrm5}kZxyP<3**2wc|rxoeE2>Sx@{_|Ewl(D4@!+py6lhL!b>eoitLXm zZkAtYll1?0)R)4_<-~@f5^UTY1&goKjO~~n;VUM*tZL!y zsR)DuG09hH6P6H7R0A^8z1Ibk7*=pqkGN~0v;Na|-Tk0AoEALEe>?a$Q&S(|4{nG# z$;neNtd>#x)&jY;?8(abrlXFo<){2SBE==ba>5e$-)0P(Ky@Pw=F$SZSBGfY^#`k|%1aczZ#N6}qz z%m@wp7m&Y!zO8LLE)}-Nl{rU#F=Z;Z4EALRy>)A3TT-1K@`M)YhY)`B#ul0IcCPIx zCf%5(&`2cIbSL^#=IO`ZHnnunAU1+zmZEQ(W0Yj2{=tT!)X2YI4WFLoE5b0#^*{I^ z;5rpeE7NwYLlvV-SU=B=v2<~NHid*da%LmUi)7B7Br`FTJNK%<-8L*wy*6V4MVrZc zxuEo>?+#}}Z!j`!_WbNvuRUwB$`>~K0Rg(4BpcjHXl;0;aOKPE-`o#)NdoXF8r;e~ z`(er12|ZzA7}MeT*1*6+N(<>&UB!0|ZtXgnAH_(|6YtK}|F1n+rfnciNN7N^kt$~0 z;K}kmz)3wX!8t*_xbVk`s_H5G;1=hVt5=LS-p+WZ4&*`Qc?sOt<38$8f}N_-u!hPV zMustdGJr)b5wJf}ErfzLL?eFm6&xyCnlmtrJqL3fUZN?bhW_R7Ws_?j%l?@)`h1o< z@=H8jogAcTv=ki9BG}0_t3vDG-sfZ>t7N9}MNy2-VRFfOzI@ymb}(#hgc!U3h!QuT z#6_SS+&Y@Pa>ZCqGLmX2`YH3)do3C-&9q$ylJ1c&!oG1Mc>g!6cPaF>2>LTPL_DvU z%$LH3NC&q!KF!bc0a(n)IQ-{2!ve-)L=hoq5$9FtklZc8(pczH4f;8G;gaiWsl*>e&!(gp{&@XQP~Fd#+sCO1o>{T=dV2e&^r$TbIn z=me1gCWJ@lT`1fg?~~f9Smm0T&CF>8mBzC#d`UZk@J`*4kL$#Kik{T!YHNsq8}`gc`C;!xH#PS7VbT+7~Ld3~s4?sO+^ zcqNc3#?#a8^07)in+ZaY<7>d;qc$(O$DS-~K$0O0{&H;tYtawMxw+SMiOH`RNsOpZ z8=O~0648ZH8z5N3r>*P*2PV*ym?2a}_%D26HDJA<{fBBqqU6->Cq}OHvNkOG$f0(ba5-gN%6ITYeM5tnd z^JO|5htRRrD_mrpsJjpUG%+xQWI#Fq!~ zZZn)|eTNcpOBTYH!}GZEvPDN~6rv-o6NHfMm{06cqU7W0N+v{2c45n0ML0|C%qJwE z{2;IFq@{hx_B9>n(1JiN?~nDl5GD7%fD1S7{sopzeEijq9!lR!yf54$r>TcejybUF zNZSK2y}vomhUdLPr~Z5(JCYj)<3fv=W>AK;z+l5ZRzT*VwHeKC}(ND?~bwYI7h)@w#QBX9w3?ggcN!vp+U@h z&RUn5Ek|!D^1B_fMTo8!phwrdSmt>5U-a;cLPQ}F*`pde$0Y}fdp=9xY1ZW{ctx**e}2j&)QKaUwf*;c zlN(bRylt&*_t*Vi59Fl_;4am)vM%`{7S=ZosfSD2+Ufbt`wQn&%qihNIxjc~(Ezdq zY72$J>>P#p{pa=1jEL##?D(YgONkD?+maiBWr>enS;-7oGM`L{V>HSa^@r$oq@6op zzF$86EbpH%WayOx=Jv*M$Tt*kto`5Is*iNa8B{nB789=Y8ENWM$|Ls@ z8}U{h4k!~bwr;syuYs3-JCJcZw59J4eeS)M{>z20^)U4Vvw6$M?g&~FqXr9eF)Jat zazyvFrE`R}&WH(lb?u?`t=Y-BfxJ;~k?Baa1nCwN(Ld-^lYNyWE(4aY+|}p%ko-1O z`t=c7(%C&3X3CjKlwLj&Lmhdt#`mM+c-03=2J&dl2)$P0lsV00Af#=}0}h;R`(5Rd zn^>x!G%_5mw}^DW++RJmi>2etV-=&JwNN}5+LjEYm&D_DG9x9V+wfB974r_5WAhxZ zgP8TyNOi1L^9ET)jo7wTGGOMFjKYC;zL8_K^0cOC$R=wM*+2nU_ z_v51U0$pu?QC> z=#j@%5^tp<)4WA2Bhf{^y!5GVMEkcaD*3_7@9nQnF#Im-wQwzw7y`-qVvslk9E&jp=K6CIupXM07~XfW;n&f=kzIm%1D$87XSju5QZVj-y5De0ASx zM90gkKP$g)8j_IuD-ULvnSTETl}>46aH2Qm>JyMV1eRL)=j>6v!p>a}co_z}lkbA( zs#O0DSk}m$m^bVM{%eD7gE^Mc{Z5M zvRk~@d(FwlKNUYNC&ID>qcQ?lJNCJB17X!v^$cLOvzlj+2N!TJQlarqf%P!LeyV8BW$Bn>)JUaY_2%jw+Dmi9_rZi|JpxP>kn>@Q{$Z9Uwl~`G)r`9!Z>n5 z8z>|my9eYys!Lb@jF1Gw#L~zwp`t< z>OVs6!24|1|_LQ!moA2-hDH+m`!Mt3DzY_lbI(8gc77+M9BhwR%b8 z$3iCN&d+;u<#<;7Jnu$eeOv+pwCQG(_6o)xVHzXuO~QpOQJ+V za1S3Vw~GP+yi5~7lx>uIeaQ_!i*DR4p?3`ka2fSsLVy2OcOtbm!Xi*+AKL3iS36H3 zJNGF1WK^@G>etTJcIB zYxsPc+xTyeOMupFGbL(91|p^4b5Qyw^c+WimF0kX`wq?%l~_mi0t7^ePdN+eyio|UA+xmt+p2(rs}D*R=2F$>ey7R z0?)n`GzRRr?d=`vxdyBR?4OuSFJssFd8QXY295$3p`S@goLsgL z>pAVy^uM>kKIpb;$KB7t8C zH7UAm)fc_T;x@OVzdhO6wEyS*-2DV_<4&1rbUx>qCg;3RnN7>WqV3FHg*FD+YyD8f z?S`dE!&eyo(j%=l{N3f>5$=aYEhdV!XSFAn%?%jY@beWZ_|vZJ({q_Nh{?tApP{mC z1RPWIkmY!KPC50x?Mzz}*Xuu)Ev)XldT3ri3gLqngr-9X4$U1k@n&CA+rnUGcn8a& zQhOa(#w1g?!c<7=?}Z+I43eNxmx}WsPnYSh@G1?#{x&pjuo8rc|G*~4jeT8r1&P}B z9=YSu@odUdV8 z$YruC)|w*bimjQ=b|%UUKlcIxTc4?%U#X$Byz`6i6T)HhL#()28c4%38k}u@@CYc- z>+5o+IK!|nT?OB^5b7|Xc0XwMAZkL?-N!B9UMRTNv0$w%_1#1a?9iPQVKth9DGxRZ znHR)QefKsq6u=`I1&*rX-sFd3-SGhT3f~M2%B6{UXqPv@BWfUPp-N#4Z!>tf-FG0L z{R_E!5I1qz-TwanR2)-~*rXd~(okrpoK47a>kO`2z!-~IwFioY(gON%iJyl6{e@GZ zaTDV1-*v&HAG2{aM#m4*Oz`S(0;AvAB*gXs(Ko?Uq@R4enjs z!aKqXcq(oaQ9kfMv#hXRBps%wV(x!r~WJJv1lTrtd z9^+JOI$fGf*?^AP+iv)75z^DF0urvEYAjQV28W%P#E2wE&09mp%j=>jozi62RCmhY z_W^A9`j6+rZp;t#wHvgCkLmWkde2BmW}c3oP3G$g!Sb%#{4?CV!Rc?>FmenLF;=iH z5B?`Mzs}zQd$Qk}2t;|bcarhIo!p(v2rxNo+@J_Yx^yyH+w=)DdL|hz1q5+$XW4l(%t2;#IL^OIL_;J;dA#-!;`y=n)d0o zr+v2CUTX$%rm!ttwfQXQ&>j%IuC0XR*nT5Zn=67kWUtH|jvZ~rtM~d^malJdy5?KO z3!r-}hQB+|f0 zK3DnV#utT<8*|OsL-mc%vs|l~Z%Ju2a2G@QYa2)rOlUMXe}6WUPR_q;FEn4}R2jnY z`Ys&FR{8CvO4hPJvqqZbUsbt<;Tzr=PYbrK2@jIvo~rJA?2bqg+5j;U5yR|7)Xd_E z+K-s#xd<}I=@vYD75^NY5`^3yeMofL4ss(PS11{CDH~+9iNV~J-%Q+L9qRcpI935N z(Nq7Kuw) z9U#`q)yc*Idh0Gyr-63>r)jfneJmH^3Fp6YL8zObm-sILY2XxX;$NlEe0N##e9%_V znq?#fPT$*%1kZb?V^xE+*d_K^N0JIc+^|U^$K567v@Df*Qr0lz{O*tNpz@+gz>>eZ z%|&+@u>P`PWzUzc&pKgXRr`0m0?xGutvbZ7Hoq@CPDzCb4HyUu|FV+0f5~KuCu+;# zqGpPtAj+S&a;Zy0l{1Uk{J%@d3w4i7?nVna*|fVoOu2smvfw!?m$W+9sN^^Q_)RS4 zEvK9u=lBZS_G!mxJm(U6(&fnvbrXEyiT>k&9jZ%m46RQtZ|?^=`MG=#E|NpBUVZVg zDXfAH=EbHpA(PJe*E`~wR6*-2bW^S9;!+OlGd(ppHq~s5DkMNT`Gf)^#QcJ4%dt4YI2)(H>x`p{~ zvE!SEAAtw>Tg~}vJ!C=ybSdlsp6xKe@7!9`1=Nq8CkU3xpLXA)Kl|JdDeI4?Z&lQp zgFy!zdR=b`Bvi2pIxGwajv9dk{P~j6k6?x)YvodFhl?p$6z<>8aL*3B-00U!6AMJM z-&E}`!DOaR&cW0|^zRSE&(~5dl+??{giz5UGk&b~ENW3#XpE2i&SVdt8)ns$e3G`| zy(ImRXxQ^^E0BYo2AL6yODp)nmlKUk4F6*$rZN6Z1PW&|b7$-@9QmR$D9u9nO=qs( z^-V^}7o<#NB&CMbCvSK=gs^Ng>u*vZ;2LQcpBSmon4f!_#XWcxScIMPYcBdHkJ37z z(1GG45vgubI8?(l;CEIuEYabT_cV92=KUMc0bMU~1I~z~RBIw8?GLS4rR*h5E$CUt z81EfdF_gKO&c7ZGF?#jh{trtIDwdBZ@tsd|<*#ZJD-|93;!|#bp5MmJvPi_A0(9bn zYbLOu2A0Zt1L`P5=dFDQC5#ZrnqBv_Vb~sg2N)0ls~WALWz3ar2802;K&P3jVgLKL?|CKWw@MW5^FmbBrmR!;_v zPoj)EUs=5hKi7Bn(vCxfH>lncqNlm$uo>>D;Z-kgG7SXY0RvT32*IVKB3TsU%il}w z)T4j_PNDdLeD&V->9uFA^tr%lZ-YufZyKbwM%ur^b}kAUb7h`k7~#Kt-3l%F0v!cH zgLB@r_h_9rB(Ni1^MTSmK`T(<^tjia`*JgbdE4o3fnxZN+P|tHzV?LHvihZ1@sO4d)glr07=je0kQ>}GVF-qsW6|D6Cuj7FbcApeL#3vTOIfcYf5~ap?@vZ$` z6-@qrTl^?Es7W*T0mWa+pDK9-9h6-i3hS^Qbjue1Am@cu9nPM4YY`((lSFK-Kv|ty zFAv(CaeR^|DrI5)7Kz=Pi&{&d#%bVA_En46_ptAkq-*gLV{TYb*M5X|1; zLQh02qm2f6{$=Cu)JNzUuz~PH5OeSe7m8pqPhq#rWk4{!e4peU{jokc9k@$7KB)_b zpx~{nZQNE+0iIO#j9{%8W0zS3`aUJ4Ciut?x(Ze&?JeD~dVW9ByM5}Sm8>~k_!Wk3 zO|WfPQ{O#MO;HVXQ#$GichZ`Vx2)P9``1yaxZQCedgi8{eYPQ&xPs01!I%-UqcGu# z9VYQb+w?F*qLwY8=NqjIU|; zM1Pk4qZ?cS3E}%*C{)#+b38D9~y9TAtiAFTotlo~>@SpeT zjr-r}jJ`|tr|+gdx3e$vM-bbOg1|P%Iw5T88NViu#%f~fAneEQ7r})0Z`Wb&7f(#j zBx941aaXn_IZ7kNKAlfc`6nIx#hmc~<-0{N+JLW*&%!v#ufLLL|L#u9Mqn}RO|JUo z8@q7NlpIo>QcisEU#sU(@QtHwemE5J^$s>*IgWh>yzBB;PD)cilrJJ%K7(M2mT`FR zNCP=}pvOXiaObTCedup@Kq)Jz$K?{zt7Rz4^nU{?QYCcqk}+d0X=K^lGI9DMBAyNQ zu_|PcNvr!cC6(;>Z3=PZ4!@-&GXM9>L?}Kb24dAss}3B&f5`QsIu=yydZUdo zDkwaNDc(X3I9YB6U7Wy@L?I)uecUKYaqqoGVA znRaqQXCJ5z*Z&F3FFgt571lD$Imtqn^2)yI|CAurgCB5Lug6W9Ln?)z#%-#B8tXel z$v#Y#9>|BX5N_fRQp=;E#eP0WqdqoZEKUV5!SblLK$RZNq_33=h$6D9V{<6VL2<5H zV*C2VKx$A|?JiKwqca4m$|1GGAO}~5RJ5!%M5aanHLipU&VA22J8@Jj)eR)|kyu!x zx3~|g&oNazYToVZaD7jzQ}>K5FT3b`Rgbilox9FTh#DQe6BnXAlSu}ztb67+!ea==0GMd3SJ8ZAd;xc8w)U$ zyK$~CP-oxBWN9>blu=+tKdF1a=IwZyDKM(M)jW~!d?OMc{`-bN@?F`%XOtvy&{^(X zbnmN=RfI>VY5Tic)EJUgQqlmH-r4D`xD>tIiIOC**wCNbCmlZ}UQRF5aW@ACAbxt!aQ*qp21}x^sl*gQnXh7d(K7itgMI5qE9i`?%d1x?|T!p z)-E;J93>k=4K7$kd(D?l%$Mb*2k{sVAX=7y156-=)Z^yNsEXZOO}X?a(H)l_)z4R9 zd?aLTP+Zl>n6^rS|Gyk;D#7%(c6g}5;oj86oA;tYQU@7IU(ikIuchT4x&^{y+WYg`e4?p^(2HZucrT(7;GAbhHd^;x$ka{|~C-bCdu8 literal 0 HcmV?d00001 diff --git a/mkdocs/docs/cli.md b/mkdocs/docs/cli.md new file mode 100644 index 0000000000..695011a6ef --- /dev/null +++ b/mkdocs/docs/cli.md @@ -0,0 +1,220 @@ +--- +hide: + - navigation +--- + + + +# Python CLI + +Pyiceberg comes with a CLI that's available after installing the `pyiceberg` package. + +You can pass the path to the Catalog using the `--uri` and `--credential` argument, but it is recommended to setup a `~/.pyiceberg.yaml` config as described in the [Catalog](configuration.md) section. + +```sh +➜ pyiceberg --help +Usage: pyiceberg [OPTIONS] COMMAND [ARGS]... + +Options: +--catalog TEXT +--verbose BOOLEAN +--output [text|json] +--uri TEXT +--credential TEXT +--help Show this message and exit. + +Commands: +describe Describes a namespace xor table +drop Operations to drop a namespace or table +list Lists tables or namespaces +location Returns the location of the table +properties Properties on tables/namespaces +rename Renames a table +schema Gets the schema of the table +spec Returns the partition spec of the table +uuid Returns the UUID of the table +``` + +This example assumes that you have a default catalog set. If you want to load another catalog, for example, the rest example above. Then you need to set `--catalog rest`. + +```sh +➜ pyiceberg list +default +nyc +``` + +```sh +➜ pyiceberg list nyc +nyc.taxis +``` + +```sh +➜ pyiceberg describe nyc.taxis +Table format version 1 +Metadata location file:/.../nyc.db/taxis/metadata/00000-aa3a3eac-ea08-4255-b890-383a64a94e42.metadata.json +Table UUID 6cdfda33-bfa3-48a7-a09e-7abb462e3460 +Last Updated 1661783158061 +Partition spec [] +Sort order [] +Current schema Schema, id=0 +├── 1: VendorID: optional long +├── 2: tpep_pickup_datetime: optional timestamptz +├── 3: tpep_dropoff_datetime: optional timestamptz +├── 4: passenger_count: optional double +├── 5: trip_distance: optional double +├── 6: RatecodeID: optional double +├── 7: store_and_fwd_flag: optional string +├── 8: PULocationID: optional long +├── 9: DOLocationID: optional long +├── 10: payment_type: optional long +├── 11: fare_amount: optional double +├── 12: extra: optional double +├── 13: mta_tax: optional double +├── 14: tip_amount: optional double +├── 15: tolls_amount: optional double +├── 16: improvement_surcharge: optional double +├── 17: total_amount: optional double +├── 18: congestion_surcharge: optional double +└── 19: airport_fee: optional double +Current snapshot Operation.APPEND: id=5937117119577207079, schema_id=0 +Snapshots Snapshots +└── Snapshot 5937117119577207079, schema 0: file:/.../nyc.db/taxis/metadata/snap-5937117119577207079-1-94656c4f-4c66-4600-a4ca-f30377300527.avro +Properties owner root +write.format.default parquet +``` + +Or output in JSON for automation: + +```sh +➜ pyiceberg --output json describe nyc.taxis | jq +{ + "identifier": [ + "nyc", + "taxis" + ], + "metadata_location": "file:/.../nyc.db/taxis/metadata/00000-aa3a3eac-ea08-4255-b890-383a64a94e42.metadata.json", + "metadata": { + "location": "file:/.../nyc.db/taxis", + "table-uuid": "6cdfda33-bfa3-48a7-a09e-7abb462e3460", + "last-updated-ms": 1661783158061, + "last-column-id": 19, + "schemas": [ + { + "type": "struct", + "fields": [ + { + "id": 1, + "name": "VendorID", + "type": "long", + "required": false + }, +... + { + "id": 19, + "name": "airport_fee", + "type": "double", + "required": false + } + ], + "schema-id": 0, + "identifier-field-ids": [] + } + ], + "current-schema-id": 0, + "partition-specs": [ + { + "spec-id": 0, + "fields": [] + } + ], + "default-spec-id": 0, + "last-partition-id": 999, + "properties": { + "owner": "root", + "write.format.default": "parquet" + }, + "current-snapshot-id": 5937117119577207000, + "snapshots": [ + { + "snapshot-id": 5937117119577207000, + "timestamp-ms": 1661783158061, + "manifest-list": "file:/.../nyc.db/taxis/metadata/snap-5937117119577207079-1-94656c4f-4c66-4600-a4ca-f30377300527.avro", + "summary": { + "operation": "append", + "spark.app.id": "local-1661783139151", + "added-data-files": "1", + "added-records": "2979431", + "added-files-size": "46600777", + "changed-partition-count": "1", + "total-records": "2979431", + "total-files-size": "46600777", + "total-data-files": "1", + "total-delete-files": "0", + "total-position-deletes": "0", + "total-equality-deletes": "0" + }, + "schema-id": 0 + } + ], + "snapshot-log": [ + { + "snapshot-id": "5937117119577207079", + "timestamp-ms": 1661783158061 + } + ], + "metadata-log": [], + "sort-orders": [ + { + "order-id": 0, + "fields": [] + } + ], + "default-sort-order-id": 0, + "refs": { + "main": { + "snapshot-id": 5937117119577207000, + "type": "branch" + } + }, + "format-version": 1, + "schema": { + "type": "struct", + "fields": [ + { + "id": 1, + "name": "VendorID", + "type": "long", + "required": false + }, +... + { + "id": 19, + "name": "airport_fee", + "type": "double", + "required": false + } + ], + "schema-id": 0, + "identifier-field-ids": [] + }, + "partition-spec": [] + } +} +``` diff --git a/mkdocs/docs/configuration.md b/mkdocs/docs/configuration.md new file mode 100644 index 0000000000..a56baff7b5 --- /dev/null +++ b/mkdocs/docs/configuration.md @@ -0,0 +1,200 @@ +--- +hide: + - navigation +--- + + + +# Catalogs + +PyIceberg currently has native support for REST, SQL, Hive, Glue and DynamoDB. + +There are three ways to pass in configuration: + +- Using the `~/.pyiceberg.yaml` configuration file +- Through environment variables +- By passing in credentials through the CLI or the Python API + +The configuration file is recommended since that's the most transparent way. If you prefer environment configuration: + +```sh +export PYICEBERG_CATALOG__DEFAULT__URI=thrift://localhost:9083 +``` + +The environment variable picked up by Iceberg starts with `PYICEBERG_` and then follows the yaml structure below, where a double underscore `__` represents a nested field. + +## FileIO + +Iceberg works with the concept of a FileIO which is a pluggable module for reading, writing, and deleting files. By default, PyIceberg will try to initialize the FileIO that's suitable for the scheme (`s3://`, `gs://`, etc.) and will use the first one that's installed. + +- **s3**, **s3a**, **s3n**: `PyArrowFileIO`, `FsspecFileIO` +- **gs**: `PyArrowFileIO` +- **file**: `PyArrowFileIO` +- **hdfs**: `PyArrowFileIO` +- **abfs**, **abfss**: `FsspecFileIO` + +You can also set the FileIO explicitly: + +| Key | Example | Description | +| ---------- | -------------------------------- | ----------------------------------------------------------------------------------------------- | +| py-io-impl | pyiceberg.io.fsspec.FsspecFileIO | Sets the FileIO explicitly to an implementation, and will fail explicitly if it can't be loaded | + +For the FileIO there are several configuration options available: + +### S3 + +| Key | Example | Description | +| -------------------- | ------------------------ | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| s3.endpoint | https://10.0.19.25/ | Configure an alternative endpoint of the S3 service for the FileIO to access. This could be used to use S3FileIO with any s3-compatible object storage service that has a different endpoint, or access a private S3 endpoint in a virtual private cloud. | +| s3.access-key-id | admin | Configure the static secret access key used to access the FileIO. | +| s3.secret-access-key | password | Configure the static session token used to access the FileIO. | +| s3.signer | bearer | Configure the signature version of the FileIO. | +| s3.region | us-west-2 | Sets the region of the bucket | +| s3.proxy-uri | http://my.proxy.com:8080 | Configure the proxy server to be used by the FileIO. | + +### HDFS + +| Key | Example | Description | +| -------------------- | ------------------- | ------------------------------------------------ | +| hdfs.host | https://10.0.19.25/ | Configure the HDFS host to connect to | +| hdfs.port | 9000 | Configure the HDFS port to connect to. | +| hdfs.user | user | Configure the HDFS username used for connection. | +| hdfs.kerberos_ticket | kerberos_ticket | Configure the path to the Kerberos ticket cache. | + +### Azure Data lake + +### Azure Data lake + +| Key | Example | Description | +| ----------------------- | ----------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| adlfs.connection-string | AccountName=devstoreaccount1;AccountKey=Eby8vdM02xNOcqF...;BlobEndpoint=http://localhost/ | A [connection string](https://learn.microsoft.com/en-us/azure/storage/common/storage-configure-connection-string). This could be used to use FileIO with any adlfs-compatible object storage service that has a different endpoint (like [azurite](https://github.com/azure/azurite)). | +| adlfs.account-name | devstoreaccount1 | The account that you want to connect to | +| adlfs.account-key | Eby8vdM02xNOcqF... | The key to authentication against the account. | +| adlfs.sas-token | NuHOuuzdQN7VRM%2FOpOeqBlawRCA845IY05h9eu1Yte4%3D | The shared access signature | +| adlfs.tenant-id | ad667be4-b811-11ed-afa1-0242ac120002 | The tenant-id | +| adlfs.client-id | ad667be4-b811-11ed-afa1-0242ac120002 | The client-id | +| adlfs.client-secret | oCA3R6P\*ka#oa1Sms2J74z... | The client-secret | + +### Google Cloud Storage + +| Key | Example | Description | +| -------------------------- | ------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| gcs.project-id | my-gcp-project | Configure Google Cloud Project for GCS FileIO. | +| gcs.oauth.token | ya29.dr.AfM... | Configure method authentication to GCS for FileIO. Can be the following, 'google_default', 'cache', 'anon', 'browser', 'cloud'. If not specified your credentials will be resolved in the following order: gcloud CLI default, gcsfs cached token, google compute metadata service, anonymous. | +| gcs.oauth.token-expires-at | 1690971805918 | Configure expiration for credential generated with an access token. Milliseconds since epoch | +| gcs.access | read_only | Configure client to have specific access. Must be one of 'read_only', 'read_write', or 'full_control' | +| gcs.consistency | md5 | Configure the check method when writing files. Must be one of 'none', 'size', or 'md5' | +| gcs.cache-timeout | 60 | Configure the cache expiration time in seconds for object metadata cache | +| gcs.requester-pays | False | Configure whether to use requester-pays requests | +| gcs.session-kwargs | {} | Configure a dict of parameters to pass on to aiohttp.ClientSession; can contain, for example, proxy settings. | +| gcs.endpoint | http://0.0.0.0:4443 | Configure an alternative endpoint for the GCS FileIO to access (format protocol://host:port) If not given, defaults to the value of environment variable "STORAGE_EMULATOR_HOST"; if that is not set either, will use the standard Google endpoint. | +| gcs.default-location | US | Configure the default location where buckets are created, like 'US' or 'EUROPE-WEST3'. | +| gcs.version-aware | False | Configure whether to support object versioning on the GCS bucket. | + +## REST Catalog + +```yaml +catalog: + default: + uri: http://rest-catalog/ws/ + credential: t-1234:secret + + default-mtls-secured-catalog: + uri: https://rest-catalog/ws/ + ssl: + client: + cert: /absolute/path/to/client.crt + key: /absolute/path/to/client.key + cabundle: /absolute/path/to/cabundle.pem +``` + +| Key | Example | Description | +| ------------------- | ----------------------- | -------------------------------------------------------------------------- | +| uri | https://rest-catalog/ws | URI identifying the REST Server | +| credential | t-1234:secret | Credential to use for OAuth2 credential flow when initializing the catalog | +| token | FEW23.DFSDF.FSDF | Bearer token value to use for `Authorization` header | +| rest.sigv4-enabled | true | Sign requests to the REST Server using AWS SigV4 protocol | +| rest.signing-region | us-east-1 | The region to use when SigV4 signing a request | +| rest.signing-name | execute-api | The service signing name to use when SigV4 signing a request | + +## SQL Catalog + +The SQL catalog requires a database for its backend. As of now, pyiceberg only supports PostgreSQL through psycopg2. +The database connection has to be configured using the `uri` property (see SQLAlchemy's [documentation for URL format](https://docs.sqlalchemy.org/en/20/core/engines.html#backend-specific-urls)): + +```yaml +catalog: + default: + type: sql + uri: postgresql+psycopg2://username:password@localhost/mydatabase +``` + +## Hive Catalog + +```yaml +catalog: + default: + uri: thrift://localhost:9083 + s3.endpoint: http://localhost:9000 + s3.access-key-id: admin + s3.secret-access-key: password +``` + +## Glue Catalog + +Your AWS credentials can be passed directly through the Python API. +Otherwise, please refer to +[How to configure AWS credentials](https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-configure.html) to set your AWS account credentials locally. +If you did not set up a default AWS profile, you can configure the `profile_name`. + +```yaml +catalog: + default: + type: glue + aws_access_key_id: + aws_secret_access_key: + aws_session_token: + region_name: +``` + +```yaml +catalog: + default: + type: glue + profile_name: + region_name: +``` + +## DynamoDB Catalog + +If you want to use AWS DynamoDB as the catalog, you can use the last two ways to configure the pyiceberg and refer +[How to configure AWS credentials](https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-configure.html) +to set your AWS account credentials locally. + +```yaml +catalog: + default: + type: dynamodb + table-name: iceberg +``` + +# Concurrency + +PyIceberg uses multiple threads to parallelize operations. The number of workers can be configured by supplying a `max-workers` entry in the configuration file, or by setting the `PYICEBERG_MAX_WORKERS` environment variable. The default value depends on the system hardware and Python version. See [the Python documentation](https://docs.python.org/3/library/concurrent.futures.html#threadpoolexecutor) for more details. diff --git a/mkdocs/docs/contributing.md b/mkdocs/docs/contributing.md new file mode 100644 index 0000000000..87a8cc701b --- /dev/null +++ b/mkdocs/docs/contributing.md @@ -0,0 +1,163 @@ +--- +hide: + - navigation +--- + + + +# Contributing to the Iceberg Python library + +For the development, Poetry is used for packing and dependency management. You can install this using: + +```bash +pip install poetry +``` + +If you have an older version of pip and virtualenv you need to update these: + +```bash +pip install --upgrade virtualenv pip +``` + +To get started, you can run `make install`, which installs Poetry and all the dependencies of the Iceberg library. This also installs the development dependencies. If you don't want to install the development dependencies, you need to install using `poetry install --no-dev`. + +If you want to install the library on the host, you can simply run `pip3 install -e .`. If you wish to use a virtual environment, you can run `poetry shell`. Poetry will open up a virtual environment with all the dependencies set. + +To set up IDEA with Poetry ([also on Loom](https://www.loom.com/share/6d36464d45f244729d91003e7f671fd2)): + +- Open up the Python project in IntelliJ +- Make sure that you're on latest master (that includes Poetry) +- Go to File -> Project Structure (⌘;) +- Go to Platform Settings -> SDKs +- Click the + sign -> Add Python SDK +- Select Poetry Environment from the left hand side bar and hit OK +- It can take some time to download all the dependencies based on your internet +- Go to Project Settings -> Project +- Select the Poetry SDK from the SDK dropdown, and click OK + +For IDEA ≤2021 you need to install the [Poetry integration as a plugin](https://plugins.jetbrains.com/plugin/14307-poetry/). + +Now you're set using Poetry, and all the tests will run in Poetry, and you'll have syntax highlighting in the pyproject.toml to indicate stale dependencies. + +## Linting + +`pre-commit` is used for autoformatting and linting: + +```bash +make lint +``` + +Pre-commit will automatically fix the violations such as import orders, formatting etc. Pylint errors you need to fix yourself. + +In contrast to the name suggest, it doesn't run the checks on the commit. If this is something that you like, you can set this up by running `pre-commit install`. + +You can bump the integrations to the latest version using `pre-commit autoupdate`. This will check if there is a newer version of `{black,mypy,isort,...}` and update the yaml. + +## Testing + +For Python, `pytest` is used a testing framework in combination with `coverage` to enforce 90%+ code coverage. + +```bash +make test +``` + +By default, S3 and ADLFS tests are ignored because that require minio and azurite to be running. +To run the S3 suite: + +```bash +make test-s3 +``` + +To run the ADLFS suite: + +```bash +make test-adlfs +``` + +To pass additional arguments to pytest, you can use `PYTEST_ARGS`. + +_Run pytest in verbose mode_ + +```sh +make test PYTEST_ARGS="-v" +``` + +_Run pytest with pdb enabled_ + +```sh +make test PYTEST_ARGS="--pdb" +``` + +To see all available pytest arguments, run `make test PYTEST_ARGS="--help"`. + +### Integration tests + +PyIceberg has integration tests with Apache Spark. Spark will create a new database and provision some tables that PyIceberg can query against. + +```sh +make test-integration +``` + +This will restart the containers, to get to a clean state, and then run the PyTest suite. In case something changed in the Dockerfile or the provision script, you can run: + +```sh +make test-integration-rebuild +``` + +To rebuild the containers from scratch. + +## Code standards + +Below are the formalized conventions that we adhere to in the PyIceberg project. The goal of this is to have a common agreement on how to evolve the codebase, but also using it as guidelines for newcomers to the project. + +## API Compatibility + +It is important to keep the Python public API compatible across versions. The Python official [PEP-8](https://peps.python.org/pep-0008/) defines public methods as: _Public attributes should have no leading underscores_. This means not removing any methods without any notice, or removing or renaming any existing parameters. Adding new optional parameters is okay. + +If you want to remove a method, please add a deprecation notice by annotating the function using `@deprecated`: + +```python +from pyiceberg.utils.deprecated import deprecated + + +@deprecated( + deprecated_in="0.1.0", + removed_in="0.2.0", + help_message="Please use load_something_else() instead", +) +def load_something(): + pass +``` + +Which will warn: + +``` +Call to load_something, deprecated in 0.1.0, will be removed in 0.2.0. Please use load_something_else() instead. +``` + +## Type annotations + +For the type annotation the types from the `Typing` package are used. + +PyIceberg offers support from Python 3.8 onwards, we can't use the [type hints from the standard collections](https://peps.python.org/pep-0585/). + +## Third party libraries + +PyIceberg naturally integrates into the rich Python ecosystem, however it is important to be hesitant adding third party packages. Adding a lot of packages makes the library heavyweight, and causes incompatibilities with other projects if they use a different version of the library. Also, big libraries such as `s3fs`, `adlfs`, `pyarrow`, `thrift` should be optional to avoid downloading everything, while not being sure if is actually being used. diff --git a/mkdocs/docs/feature-support.md b/mkdocs/docs/feature-support.md new file mode 100644 index 0000000000..241b92256e --- /dev/null +++ b/mkdocs/docs/feature-support.md @@ -0,0 +1,72 @@ +--- +hide: + - navigation +--- + + + +# Feature Support + +The goal is that the python library will provide a functional, performant subset of the Java library. The initial focus has been on reading table metadata and provide a convenient CLI to go through the catalog. + +## Types + +The types are kept in `pyiceberg.types`. + +Primitive types: + +- `BooleanType` +- `StringType` +- `IntegerType` +- `LongType` +- `FloatType` +- `DoubleType` +- `DateType` +- `TimeType` +- `TimestampType` +- `TimestamptzType` +- `BinaryType` +- `UUIDType` + +Complex types: + +- `StructType` +- `ListType` +- `MapType` +- `FixedType(16)` +- `DecimalType(8, 3)` + +## Expressions + +The expressions are kept in `pyiceberg.expressions`. + +- `IsNull` +- `NotNull` +- `IsNaN` +- `NotNaN` +- `In` +- `NotIn` +- `EqualTo` +- `NotEqualTo` +- `GreaterThanOrEqual` +- `GreaterThan` +- `LessThanOrEqual` +- `LessThan` +- `And` +- `Or` +- `Not` diff --git a/mkdocs/docs/how-to-release.md b/mkdocs/docs/how-to-release.md new file mode 100644 index 0000000000..37af8bbbbd --- /dev/null +++ b/mkdocs/docs/how-to-release.md @@ -0,0 +1,160 @@ + + +# How to release + +The guide to release PyIceberg. + +The first step is to publish a release candidate (RC) and publish it to the public for testing and validation. Once the vote has passed on the RC, the RC turns into the new release. + +## Running a release candidate + +Make sure that the version is correct in `pyproject.toml` and `pyiceberg/__init__.py`. Correct means that it reflects the version that you want to release. + +### Setting the tag + +First set the tag on the commit: + +```bash +export RC=rc1 +export VERSION=0.1.0${RC} +export VERSION_WITHOUT_RC=${VERSION/rc?/} +export VERSION_BRANCH=${VERSION_WITHOUT_RC//./-} +export GIT_TAG=pyiceberg-${VERSION} + +git tag -s ${GIT_TAG} -m "PyIceberg ${VERSION}" +git push apache ${GIT_TAG} + +export GIT_TAG_REF=$(git show-ref ${GIT_TAG}) +export GIT_TAG_HASH=${GIT_TAG_REF:0:40} +export LAST_COMMIT_ID=$(git rev-list ${GIT_TAG} 2> /dev/null | head -n 1) +``` + +The `-s` option will sign the commit. If you don't have a key yet, you can find the instructions [here](http://www.apache.org/dev/openpgp.html#key-gen-generate-key). To install gpg on a M1 based Mac, a couple of additional steps are required: https://gist.github.com/phortuin/cf24b1cca3258720c71ad42977e1ba57 + +### Upload to Apache SVN + +Both the source distribution (`sdist`) and the binary distributions (`wheels`) need to be published for the RC. The wheels are convenient to avoid having people to install compilers locally. The downside is that each architecture requires its own wheel. [use `cibuildwheel`](https://github.com/pypa/cibuildwheel) runs in Github actions to create a wheel for each of the architectures. + +Before committing the files to the Apache SVN artifact distribution SVN hashes need to be generated, and those need to be signed with gpg to make sure that they are authentic. + +Go to [Github Actions and run the `Python release` action](https://github.com/apache/iceberg/actions/workflows/python-release.yml). **Set the version to master, since we cannot modify the source**. Download the zip, and sign the files: + +```bash +for name in $(ls release-master/pyiceberg-*.whl release-master/pyiceberg-*.tar.gz) +do + gpg --yes --armor --local-user fokko@apache.org --output "${name}.asc" --detach-sig "${name}" + shasum -a 512 "${name}.asc" > "${name}.asc.sha512" +done +``` + +Now we can upload the files + +```bash +export SVN_TMP_DIR=/tmp/iceberg-${VERSION_BRANCH}/ +svn checkout https://dist.apache.org/repos/dist/dev/iceberg $SVN_TMP_DIR + +export SVN_TMP_DIR_VERSIONED=${SVN_TMP_DIR}pyiceberg-$VERSION/ +mkdir -p $SVN_TMP_DIR_VERSIONED +cp release-master/* $SVN_TMP_DIR_VERSIONED +svn add $SVN_TMP_DIR_VERSIONED +svn ci -m "PyIceberg ${VERSION}" ${SVN_TMP_DIR_VERSIONED} +``` + +### Upload to PyPi + +Go to Github Actions and run the `Python release` action. Set the version of the release candidate as the input: `0.1.0rc1`. Download the zip and unzip it locally. + +Next step is to upload them to pypi. Please keep in mind that this **won't** bump the version for everyone that hasn't pinned their version, since it is set to an RC [pre-release and those are ignored](https://packaging.python.org/en/latest/guides/distributing-packages-using-setuptools/#pre-release-versioning). + +```bash +twine upload -s release-0.1.0rc1/* +``` + +Final step is to generate the email to the dev mail list: + +```bash +cat << EOF > release-announcement-email.txt +To: dev@iceberg.apache.org +Subject: [VOTE] Release Apache PyIceberg $VERSION_WITHOUT_RC +Hi Everyone, + +I propose that we release the following RC as the official PyIceberg $VERSION_WITHOUT_RC release. + +The commit ID is $LAST_COMMIT_ID + +* This corresponds to the tag: $GIT_TAG ($GIT_TAG_HASH) +* https://github.com/apache/iceberg/releases/tag/$GIT_TAG +* https://github.com/apache/iceberg/tree/$LAST_COMMIT_ID + +The release tarball, signature, and checksums are here: + +* https://dist.apache.org/repos/dist/dev/iceberg/pyiceberg-$VERSION/ + +You can find the KEYS file here: + +* https://dist.apache.org/repos/dist/dev/iceberg/KEYS + +Convenience binary artifacts are staged on pypi: + +https://pypi.org/project/pyiceberg/$VERSION/ + +And can be installed using: pip3 install pyiceberg==$VERSION + +Please download, verify, and test. + +Please vote in the next 72 hours. +[ ] +1 Release this as PyIceberg $VERSION_WITHOUT_RC +[ ] +0 +[ ] -1 Do not release this because... +EOF + +cat release-announcement-email.txt +``` + +## Vote has passed + +Once the vote has been passed, the latest version can be pushed to PyPi. Check out the Apache SVN and make sure to publish the right version with `twine`: + +```bash +svn checkout https://dist.apache.org/repos/dist/dev/iceberg /tmp/ +twine upload -s /tmp/iceberg/pyiceberg-0.1.0rc1 +``` + +Send out an announcement on the dev mail list: + +``` +To: dev@iceberg.apache.org +Subject: [ANNOUNCE] Apache PyIceberg release + +I'm pleased to announce the release of Apache PyIceberg ! + +Apache Iceberg is an open table format for huge analytic datasets. Iceberg +delivers high query performance for tables with tens of petabytes of data, +along with atomic commits, concurrent writes, and SQL-compatible table +evolution. + +This Python release can be downloaded from: https://pypi.org/project/pyiceberg// + +Thanks to everyone for contributing! +``` + +## Release the docs + +A committer triggers the [`Python Docs` Github Actions](https://github.com/apache/iceberg/actions/workflows/python-ci-docs.yml) through the UI by selecting the branch that just has been released. This will publish the new docs. diff --git a/mkdocs/docs/index.md b/mkdocs/docs/index.md new file mode 100644 index 0000000000..23adde5d96 --- /dev/null +++ b/mkdocs/docs/index.md @@ -0,0 +1,73 @@ +--- +hide: + - navigation +--- + + + +# PyIceberg + +PyIceberg is a Python implementation for accessing Iceberg tables, without the need of a JVM. + +## Install + +Before installing PyIceberg, make sure that you're on an up-to-date version of `pip`: + +```sh +pip install --upgrade pip +``` + +You can install the latest release version from pypi: + +```sh +pip install "pyiceberg[s3fs,hive]" +``` + +Install it directly for Github (not recommended), but sometimes handy: + +``` +pip install "git+https://github.com/apache/iceberg.git#subdirectory=python&egg=pyiceberg[s3fs]" +``` + +Or clone the repository for local development: + +```sh +git clone https://github.com/apache/iceberg.git +cd iceberg/python +pip3 install -e ".[s3fs,hive]" +``` + +You can mix and match optional dependencies depending on your needs: + +| Key | Description: | +| -------- | -------------------------------------------------------------------- | +| hive | Support for the Hive metastore | +| glue | Support for AWS Glue | +| dynamodb | Support for AWS DynamoDB | +| pyarrow | PyArrow as a FileIO implementation to interact with the object store | +| pandas | Installs both PyArrow and Pandas | +| duckdb | Installs both PyArrow and DuckDB | +| ray | Installs PyArrow, Pandas, and Ray | +| s3fs | S3FS as a FileIO implementation to interact with the object store | +| adlfs | ADLFS as a FileIO implementation to interact with the object store | +| snappy | Support for snappy Avro compression | +| gcs | GCS as the FileIO implementation to interact with the object store | + +You either need to install `s3fs`, `adlfs`, `gcs`, or `pyarrow` for fetching files. + +There is both a [CLI](cli.md) and [Python API](api.md) available. diff --git a/mkdocs/docs/verify-release.md b/mkdocs/docs/verify-release.md new file mode 100644 index 0000000000..5750d28c2d --- /dev/null +++ b/mkdocs/docs/verify-release.md @@ -0,0 +1,110 @@ + + +# Verifying a release + +Each Apache PyIceberg release is validated by the community by holding a vote. A community release manager will prepare a release candidate and call a vote on the Iceberg dev list. To validate the release candidate, community members will test it out in their downstream projects and environments. + +In addition to testing in downstream projects, community members also check the release’s signatures, checksums, and license documentation. + +## Validating a release candidate + +Release announcements include links to the following: + +- A source tarball +- A signature (.asc) +- A checksum (.sha512) +- KEYS file +- GitHub change comparison + +After downloading the source tarball, signature, checksum, and KEYS file, here are instructions on how to verify signatures, checksums, and documentation. + +## Verifying signatures + +First, import the keys. + +```sh +curl https://dist.apache.org/repos/dist/dev/iceberg/KEYS -o KEYS +gpg --import KEYS +``` + +Next, verify the `.asc` file. + +```sh +gpg --verify pyiceberg-0.4.0-py3-none-any.whl.asc pyiceberg-0.4.0-py3-none-any.whl +``` + +## Verifying checksums + +```sh +shasum -a 512 --check pyiceberg-0.4.0-py3-none-any.whl.sha512 +``` + +## Verifying License Documentation + +```sh +tar xzf pyiceberg-0.4.0.tar.gz +cd pyiceberg-0.4.0 +``` + +Run RAT checks to validate license header: + +``` +./dev/check-license +``` + +## Testing + +This section explains how to run the tests of the source distribution. + + + +!!! note "Clean environment" + To make sure that your environment is fresh is to run the tests in a new Docker container: + `docker run -t -i -v $(pwd):/pyiceberg/ python:3.9 bash`. And change directory: `cd /pyiceberg/`. + + + +First step is to install the package: + +```sh +make install +``` + +And then run the tests: + +```sh +make test +``` + +To run the full integration tests: + +```sh +make test-s3 +``` + +This will include a Minio S3 container being spun up. + +# Cast the vote + +Votes are cast by replying to the release candidate announcement email on the dev mailing list with either `+1`, `0`, or `-1`. For example : + +> \[ \] +1 Release this as PyIceberg 0.3.0 \[ \] +0 \[ \] -1 Do not release this because… + +In addition to your vote, it’s customary to specify if your vote is binding or non-binding. Only members of the Project Management Committee have formally binding votes. If you’re unsure, you can specify that your vote is non-binding. To read more about voting in the Apache framework, checkout the [Voting](https://www.apache.org/foundation/voting.html) information page on the Apache foundation’s website. diff --git a/mkdocs/gen_doc_stubs.py b/mkdocs/gen_doc_stubs.py new file mode 100644 index 0000000000..bd3b128101 --- /dev/null +++ b/mkdocs/gen_doc_stubs.py @@ -0,0 +1,55 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from pathlib import Path + +import griffe # type: ignore +import mkdocs_gen_files # type: ignore + +nav = mkdocs_gen_files.Nav() + +root = Path(__file__).parent.parent +src_root = root.joinpath("pyiceberg") + +data = griffe.load(src_root) +for path in sorted(src_root.glob("**/*.py")): + module_path = path.relative_to(root).with_suffix("") + doc_path = path.relative_to(root).with_suffix(".md") + full_doc_path = Path("reference", doc_path) + + parts = tuple(module_path.parts) + + if parts[-1] == "__init__": + parts = parts[:-1] + doc_path = doc_path.with_name("index.md") + full_doc_path = full_doc_path.with_name("index.md") + elif parts[-1].startswith("_"): + continue + + if module_path.parts[1:] in data.members and not data[module_path.parts[1:]].has_docstrings: + continue + + nav[parts] = doc_path.as_posix() + + with mkdocs_gen_files.open(full_doc_path, "w") as fd: + ident = ".".join(parts) + fd.write(f"::: {ident}") + + mkdocs_gen_files.set_edit_path(full_doc_path, Path("../") / path) + +with mkdocs_gen_files.open("reference/SUMMARY.md", "w") as nav_file: + nav_file.writelines(nav.build_literate_nav()) diff --git a/mkdocs/mkdocs.yml b/mkdocs/mkdocs.yml new file mode 100644 index 0000000000..5f35129c6b --- /dev/null +++ b/mkdocs/mkdocs.yml @@ -0,0 +1,60 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +--- +site_name: PyIceberg +site_url: https://py.iceberg.apache.org/ +repo_url: "https://github.com/apache/iceberg/tree/master/python" +repo_name: "apache/iceberg/python" + +plugins: + - gen-files: + scripts: + - gen_doc_stubs.py + - literate-nav: + nav_file: SUMMARY.md + - search + - section-index + - mkdocstrings: + handlers: + python: + paths: [..] + +theme: + name: material + logo: assets/images/iceberg-logo-icon.png + favicon: assets/images/iceberg-logo-icon.png + font: + text: Lato + features: + - navigation.top + - navigation.tracking + - navigation.tabs + - navigation.tabs.sticky + palette: + - scheme: default + toggle: + icon: material/brightness-7 + name: Switch to dark mode + - scheme: slate + toggle: + icon: material/brightness-4 + name: Switch to light mode +markdown_extensions: + - admonition + - pymdownx.highlight: + anchor_linenums: true + - pymdownx.superfences diff --git a/mkdocs/requirements.txt b/mkdocs/requirements.txt new file mode 100644 index 0000000000..baa107f325 --- /dev/null +++ b/mkdocs/requirements.txt @@ -0,0 +1,28 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +mkdocs==1.5.3 +griffe==0.36.2 +jinja2==3.1.2 +mkdocstrings==0.23.0 +mkdocstrings-python==1.7.0 +mkdocs-literate-nav==0.6.1 +mkdocs-autorefs==0.5.0 +mkdocs-gen-files==0.5.0 +mkdocs-material==9.4.1 +mkdocs-material-extensions==1.2 +mkdocs-section-index==0.3.7 diff --git a/poetry.lock b/poetry.lock new file mode 100644 index 0000000000..00b93756e5 --- /dev/null +++ b/poetry.lock @@ -0,0 +1,3588 @@ +# This file is automatically @generated by Poetry 1.6.1 and should not be changed by hand. + +[[package]] +name = "adlfs" +version = "2023.9.0" +description = "Access Azure Datalake Gen1 with fsspec and dask" +optional = true +python-versions = ">=3.8" +files = [ + {file = "adlfs-2023.9.0-py3-none-any.whl", hash = "sha256:e2cff62b8128578c6d1b9da1660ad4c8a5a8cb0d491bba416b529563c65dc5d2"}, + {file = "adlfs-2023.9.0.tar.gz", hash = "sha256:1ce70ffa39f7cffc3efbbd9f79b444958eb5d9de9981442b06e47472d2089d4b"}, +] + +[package.dependencies] +aiohttp = ">=3.7.0" +azure-core = ">=1.23.1,<2.0.0" +azure-datalake-store = ">=0.0.46,<0.1" +azure-identity = "*" +azure-storage-blob = ">=12.12.0" +fsspec = ">=2023.9.0" + +[package.extras] +docs = ["furo", "myst-parser", "numpydoc", "sphinx"] + +[[package]] +name = "aiobotocore" +version = "2.5.4" +description = "Async client for aws services using botocore and aiohttp" +optional = true +python-versions = ">=3.7" +files = [ + {file = "aiobotocore-2.5.4-py3-none-any.whl", hash = "sha256:4b32218728ca3d0be83835b604603a0cd6c329066e884bb78149334267f92440"}, + {file = "aiobotocore-2.5.4.tar.gz", hash = "sha256:60341f19eda77e41e1ab11eef171b5a98b5dbdb90804f5334b6f90e560e31fae"}, +] + +[package.dependencies] +aiohttp = ">=3.3.1,<4.0.0" +aioitertools = ">=0.5.1,<1.0.0" +botocore = ">=1.31.17,<1.31.18" +wrapt = ">=1.10.10,<2.0.0" + +[package.extras] +awscli = ["awscli (>=1.29.17,<1.29.18)"] +boto3 = ["boto3 (>=1.28.17,<1.28.18)"] + +[[package]] +name = "aiohttp" +version = "3.8.5" +description = "Async http client/server framework (asyncio)" +optional = true +python-versions = ">=3.6" +files = [ + {file = "aiohttp-3.8.5-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:a94159871304770da4dd371f4291b20cac04e8c94f11bdea1c3478e557fbe0d8"}, + {file = "aiohttp-3.8.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:13bf85afc99ce6f9ee3567b04501f18f9f8dbbb2ea11ed1a2e079670403a7c84"}, + {file = "aiohttp-3.8.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2ce2ac5708501afc4847221a521f7e4b245abf5178cf5ddae9d5b3856ddb2f3a"}, + {file = "aiohttp-3.8.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:96943e5dcc37a6529d18766597c491798b7eb7a61d48878611298afc1fca946c"}, + {file = "aiohttp-3.8.5-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2ad5c3c4590bb3cc28b4382f031f3783f25ec223557124c68754a2231d989e2b"}, + {file = "aiohttp-3.8.5-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0c413c633d0512df4dc7fd2373ec06cc6a815b7b6d6c2f208ada7e9e93a5061d"}, + {file = "aiohttp-3.8.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:df72ac063b97837a80d80dec8d54c241af059cc9bb42c4de68bd5b61ceb37caa"}, + {file = "aiohttp-3.8.5-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c48c5c0271149cfe467c0ff8eb941279fd6e3f65c9a388c984e0e6cf57538e14"}, + {file = "aiohttp-3.8.5-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:368a42363c4d70ab52c2c6420a57f190ed3dfaca6a1b19afda8165ee16416a82"}, + {file = "aiohttp-3.8.5-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:7607ec3ce4993464368505888af5beb446845a014bc676d349efec0e05085905"}, + {file = "aiohttp-3.8.5-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:0d21c684808288a98914e5aaf2a7c6a3179d4df11d249799c32d1808e79503b5"}, + {file = "aiohttp-3.8.5-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:312fcfbacc7880a8da0ae8b6abc6cc7d752e9caa0051a53d217a650b25e9a691"}, + {file = "aiohttp-3.8.5-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:ad093e823df03bb3fd37e7dec9d4670c34f9e24aeace76808fc20a507cace825"}, + {file = "aiohttp-3.8.5-cp310-cp310-win32.whl", hash = "sha256:33279701c04351a2914e1100b62b2a7fdb9a25995c4a104259f9a5ead7ed4802"}, + {file = "aiohttp-3.8.5-cp310-cp310-win_amd64.whl", hash = "sha256:6e4a280e4b975a2e7745573e3fc9c9ba0d1194a3738ce1cbaa80626cc9b4f4df"}, + {file = "aiohttp-3.8.5-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:ae871a964e1987a943d83d6709d20ec6103ca1eaf52f7e0d36ee1b5bebb8b9b9"}, + {file = "aiohttp-3.8.5-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:461908b2578955045efde733719d62f2b649c404189a09a632d245b445c9c975"}, + {file = "aiohttp-3.8.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:72a860c215e26192379f57cae5ab12b168b75db8271f111019509a1196dfc780"}, + {file = "aiohttp-3.8.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cc14be025665dba6202b6a71cfcdb53210cc498e50068bc088076624471f8bb9"}, + {file = "aiohttp-3.8.5-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8af740fc2711ad85f1a5c034a435782fbd5b5f8314c9a3ef071424a8158d7f6b"}, + {file = "aiohttp-3.8.5-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:841cd8233cbd2111a0ef0a522ce016357c5e3aff8a8ce92bcfa14cef890d698f"}, + {file = "aiohttp-3.8.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5ed1c46fb119f1b59304b5ec89f834f07124cd23ae5b74288e364477641060ff"}, + {file = "aiohttp-3.8.5-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:84f8ae3e09a34f35c18fa57f015cc394bd1389bce02503fb30c394d04ee6b938"}, + {file = "aiohttp-3.8.5-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:62360cb771707cb70a6fd114b9871d20d7dd2163a0feafe43fd115cfe4fe845e"}, + {file = "aiohttp-3.8.5-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:23fb25a9f0a1ca1f24c0a371523546366bb642397c94ab45ad3aedf2941cec6a"}, + {file = "aiohttp-3.8.5-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:b0ba0d15164eae3d878260d4c4df859bbdc6466e9e6689c344a13334f988bb53"}, + {file = "aiohttp-3.8.5-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:5d20003b635fc6ae3f96d7260281dfaf1894fc3aa24d1888a9b2628e97c241e5"}, + {file = "aiohttp-3.8.5-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:0175d745d9e85c40dcc51c8f88c74bfbaef9e7afeeeb9d03c37977270303064c"}, + {file = "aiohttp-3.8.5-cp311-cp311-win32.whl", hash = "sha256:2e1b1e51b0774408f091d268648e3d57f7260c1682e7d3a63cb00d22d71bb945"}, + {file = "aiohttp-3.8.5-cp311-cp311-win_amd64.whl", hash = "sha256:043d2299f6dfdc92f0ac5e995dfc56668e1587cea7f9aa9d8a78a1b6554e5755"}, + {file = "aiohttp-3.8.5-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:cae533195e8122584ec87531d6df000ad07737eaa3c81209e85c928854d2195c"}, + {file = "aiohttp-3.8.5-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4f21e83f355643c345177a5d1d8079f9f28b5133bcd154193b799d380331d5d3"}, + {file = "aiohttp-3.8.5-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a7a75ef35f2df54ad55dbf4b73fe1da96f370e51b10c91f08b19603c64004acc"}, + {file = "aiohttp-3.8.5-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2e2e9839e14dd5308ee773c97115f1e0a1cb1d75cbeeee9f33824fa5144c7634"}, + {file = "aiohttp-3.8.5-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c44e65da1de4403d0576473e2344828ef9c4c6244d65cf4b75549bb46d40b8dd"}, + {file = "aiohttp-3.8.5-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:78d847e4cde6ecc19125ccbc9bfac4a7ab37c234dd88fbb3c5c524e8e14da543"}, + {file = "aiohttp-3.8.5-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:c7a815258e5895d8900aec4454f38dca9aed71085f227537208057853f9d13f2"}, + {file = "aiohttp-3.8.5-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:8b929b9bd7cd7c3939f8bcfffa92fae7480bd1aa425279d51a89327d600c704d"}, + {file = "aiohttp-3.8.5-cp36-cp36m-musllinux_1_1_ppc64le.whl", hash = "sha256:5db3a5b833764280ed7618393832e0853e40f3d3e9aa128ac0ba0f8278d08649"}, + {file = "aiohttp-3.8.5-cp36-cp36m-musllinux_1_1_s390x.whl", hash = "sha256:a0215ce6041d501f3155dc219712bc41252d0ab76474615b9700d63d4d9292af"}, + {file = "aiohttp-3.8.5-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:fd1ed388ea7fbed22c4968dd64bab0198de60750a25fe8c0c9d4bef5abe13824"}, + {file = "aiohttp-3.8.5-cp36-cp36m-win32.whl", hash = "sha256:6e6783bcc45f397fdebc118d772103d751b54cddf5b60fbcc958382d7dd64f3e"}, + {file = "aiohttp-3.8.5-cp36-cp36m-win_amd64.whl", hash = "sha256:b5411d82cddd212644cf9360879eb5080f0d5f7d809d03262c50dad02f01421a"}, + {file = "aiohttp-3.8.5-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:01d4c0c874aa4ddfb8098e85d10b5e875a70adc63db91f1ae65a4b04d3344cda"}, + {file = "aiohttp-3.8.5-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e5980a746d547a6ba173fd5ee85ce9077e72d118758db05d229044b469d9029a"}, + {file = "aiohttp-3.8.5-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2a482e6da906d5e6e653be079b29bc173a48e381600161c9932d89dfae5942ef"}, + {file = "aiohttp-3.8.5-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:80bd372b8d0715c66c974cf57fe363621a02f359f1ec81cba97366948c7fc873"}, + {file = "aiohttp-3.8.5-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c1161b345c0a444ebcf46bf0a740ba5dcf50612fd3d0528883fdc0eff578006a"}, + {file = "aiohttp-3.8.5-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cd56db019015b6acfaaf92e1ac40eb8434847d9bf88b4be4efe5bfd260aee692"}, + {file = "aiohttp-3.8.5-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:153c2549f6c004d2754cc60603d4668899c9895b8a89397444a9c4efa282aaf4"}, + {file = "aiohttp-3.8.5-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:4a01951fabc4ce26ab791da5f3f24dca6d9a6f24121746eb19756416ff2d881b"}, + {file = "aiohttp-3.8.5-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:bfb9162dcf01f615462b995a516ba03e769de0789de1cadc0f916265c257e5d8"}, + {file = "aiohttp-3.8.5-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:7dde0009408969a43b04c16cbbe252c4f5ef4574ac226bc8815cd7342d2028b6"}, + {file = "aiohttp-3.8.5-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:4149d34c32f9638f38f544b3977a4c24052042affa895352d3636fa8bffd030a"}, + {file = "aiohttp-3.8.5-cp37-cp37m-win32.whl", hash = "sha256:68c5a82c8779bdfc6367c967a4a1b2aa52cd3595388bf5961a62158ee8a59e22"}, + {file = "aiohttp-3.8.5-cp37-cp37m-win_amd64.whl", hash = "sha256:2cf57fb50be5f52bda004b8893e63b48530ed9f0d6c96c84620dc92fe3cd9b9d"}, + {file = "aiohttp-3.8.5-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:eca4bf3734c541dc4f374ad6010a68ff6c6748f00451707f39857f429ca36ced"}, + {file = "aiohttp-3.8.5-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1274477e4c71ce8cfe6c1ec2f806d57c015ebf84d83373676036e256bc55d690"}, + {file = "aiohttp-3.8.5-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:28c543e54710d6158fc6f439296c7865b29e0b616629767e685a7185fab4a6b9"}, + {file = "aiohttp-3.8.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:910bec0c49637d213f5d9877105d26e0c4a4de2f8b1b29405ff37e9fc0ad52b8"}, + {file = "aiohttp-3.8.5-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5443910d662db951b2e58eb70b0fbe6b6e2ae613477129a5805d0b66c54b6cb7"}, + {file = "aiohttp-3.8.5-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2e460be6978fc24e3df83193dc0cc4de46c9909ed92dd47d349a452ef49325b7"}, + {file = "aiohttp-3.8.5-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fb1558def481d84f03b45888473fc5a1f35747b5f334ef4e7a571bc0dfcb11f8"}, + {file = "aiohttp-3.8.5-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:34dd0c107799dcbbf7d48b53be761a013c0adf5571bf50c4ecad5643fe9cfcd0"}, + {file = "aiohttp-3.8.5-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:aa1990247f02a54185dc0dff92a6904521172a22664c863a03ff64c42f9b5410"}, + {file = "aiohttp-3.8.5-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:0e584a10f204a617d71d359fe383406305a4b595b333721fa50b867b4a0a1548"}, + {file = "aiohttp-3.8.5-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:a3cf433f127efa43fee6b90ea4c6edf6c4a17109d1d037d1a52abec84d8f2e42"}, + {file = "aiohttp-3.8.5-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:c11f5b099adafb18e65c2c997d57108b5bbeaa9eeee64a84302c0978b1ec948b"}, + {file = "aiohttp-3.8.5-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:84de26ddf621d7ac4c975dbea4c945860e08cccde492269db4e1538a6a6f3c35"}, + {file = "aiohttp-3.8.5-cp38-cp38-win32.whl", hash = "sha256:ab88bafedc57dd0aab55fa728ea10c1911f7e4d8b43e1d838a1739f33712921c"}, + {file = "aiohttp-3.8.5-cp38-cp38-win_amd64.whl", hash = "sha256:5798a9aad1879f626589f3df0f8b79b3608a92e9beab10e5fda02c8a2c60db2e"}, + {file = "aiohttp-3.8.5-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:a6ce61195c6a19c785df04e71a4537e29eaa2c50fe745b732aa937c0c77169f3"}, + {file = "aiohttp-3.8.5-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:773dd01706d4db536335fcfae6ea2440a70ceb03dd3e7378f3e815b03c97ab51"}, + {file = "aiohttp-3.8.5-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f83a552443a526ea38d064588613aca983d0ee0038801bc93c0c916428310c28"}, + {file = "aiohttp-3.8.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f7372f7341fcc16f57b2caded43e81ddd18df53320b6f9f042acad41f8e049a"}, + {file = "aiohttp-3.8.5-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ea353162f249c8097ea63c2169dd1aa55de1e8fecbe63412a9bc50816e87b761"}, + {file = "aiohttp-3.8.5-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e5d47ae48db0b2dcf70bc8a3bc72b3de86e2a590fc299fdbbb15af320d2659de"}, + {file = "aiohttp-3.8.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d827176898a2b0b09694fbd1088c7a31836d1a505c243811c87ae53a3f6273c1"}, + {file = "aiohttp-3.8.5-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3562b06567c06439d8b447037bb655ef69786c590b1de86c7ab81efe1c9c15d8"}, + {file = "aiohttp-3.8.5-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:4e874cbf8caf8959d2adf572a78bba17cb0e9d7e51bb83d86a3697b686a0ab4d"}, + {file = "aiohttp-3.8.5-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:6809a00deaf3810e38c628e9a33271892f815b853605a936e2e9e5129762356c"}, + {file = "aiohttp-3.8.5-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:33776e945d89b29251b33a7e7d006ce86447b2cfd66db5e5ded4e5cd0340585c"}, + {file = "aiohttp-3.8.5-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:eaeed7abfb5d64c539e2db173f63631455f1196c37d9d8d873fc316470dfbacd"}, + {file = "aiohttp-3.8.5-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:e91d635961bec2d8f19dfeb41a539eb94bd073f075ca6dae6c8dc0ee89ad6f91"}, + {file = "aiohttp-3.8.5-cp39-cp39-win32.whl", hash = "sha256:00ad4b6f185ec67f3e6562e8a1d2b69660be43070bd0ef6fcec5211154c7df67"}, + {file = "aiohttp-3.8.5-cp39-cp39-win_amd64.whl", hash = "sha256:c0a9034379a37ae42dea7ac1e048352d96286626251862e448933c0f59cbd79c"}, + {file = "aiohttp-3.8.5.tar.gz", hash = "sha256:b9552ec52cc147dbf1944ac7ac98af7602e51ea2dcd076ed194ca3c0d1c7d0bc"}, +] + +[package.dependencies] +aiosignal = ">=1.1.2" +async-timeout = ">=4.0.0a3,<5.0" +attrs = ">=17.3.0" +charset-normalizer = ">=2.0,<4.0" +frozenlist = ">=1.1.1" +multidict = ">=4.5,<7.0" +yarl = ">=1.0,<2.0" + +[package.extras] +speedups = ["Brotli", "aiodns", "cchardet"] + +[[package]] +name = "aioitertools" +version = "0.11.0" +description = "itertools and builtins for AsyncIO and mixed iterables" +optional = true +python-versions = ">=3.6" +files = [ + {file = "aioitertools-0.11.0-py3-none-any.whl", hash = "sha256:04b95e3dab25b449def24d7df809411c10e62aab0cbe31a50ca4e68748c43394"}, + {file = "aioitertools-0.11.0.tar.gz", hash = "sha256:42c68b8dd3a69c2bf7f2233bf7df4bb58b557bca5252ac02ed5187bbc67d6831"}, +] + +[package.dependencies] +typing_extensions = {version = ">=4.0", markers = "python_version < \"3.10\""} + +[[package]] +name = "aiosignal" +version = "1.3.1" +description = "aiosignal: a list of registered asynchronous callbacks" +optional = true +python-versions = ">=3.7" +files = [ + {file = "aiosignal-1.3.1-py3-none-any.whl", hash = "sha256:f8376fb07dd1e86a584e4fcdec80b36b7f81aac666ebc724e2c090300dd83b17"}, + {file = "aiosignal-1.3.1.tar.gz", hash = "sha256:54cd96e15e1649b75d6c87526a6ff0b6c1b0dd3459f43d9ca11d48c339b68cfc"}, +] + +[package.dependencies] +frozenlist = ">=1.1.0" + +[[package]] +name = "annotated-types" +version = "0.5.0" +description = "Reusable constraint types to use with typing.Annotated" +optional = false +python-versions = ">=3.7" +files = [ + {file = "annotated_types-0.5.0-py3-none-any.whl", hash = "sha256:58da39888f92c276ad970249761ebea80ba544b77acddaa1a4d6cf78287d45fd"}, + {file = "annotated_types-0.5.0.tar.gz", hash = "sha256:47cdc3490d9ac1506ce92c7aaa76c579dc3509ff11e098fc867e5130ab7be802"}, +] + +[package.dependencies] +typing-extensions = {version = ">=4.0.0", markers = "python_version < \"3.9\""} + +[[package]] +name = "async-timeout" +version = "4.0.3" +description = "Timeout context manager for asyncio programs" +optional = true +python-versions = ">=3.7" +files = [ + {file = "async-timeout-4.0.3.tar.gz", hash = "sha256:4640d96be84d82d02ed59ea2b7105a0f7b33abe8703703cd0ab0bf87c427522f"}, + {file = "async_timeout-4.0.3-py3-none-any.whl", hash = "sha256:7405140ff1230c310e51dc27b3145b9092d659ce68ff733fb0cefe3ee42be028"}, +] + +[[package]] +name = "attrs" +version = "23.1.0" +description = "Classes Without Boilerplate" +optional = true +python-versions = ">=3.7" +files = [ + {file = "attrs-23.1.0-py3-none-any.whl", hash = "sha256:1f28b4522cdc2fb4256ac1a020c78acf9cba2c6b461ccd2c126f3aa8e8335d04"}, + {file = "attrs-23.1.0.tar.gz", hash = "sha256:6279836d581513a26f1bf235f9acd333bc9115683f14f7e8fae46c98fc50e015"}, +] + +[package.extras] +cov = ["attrs[tests]", "coverage[toml] (>=5.3)"] +dev = ["attrs[docs,tests]", "pre-commit"] +docs = ["furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier", "zope-interface"] +tests = ["attrs[tests-no-zope]", "zope-interface"] +tests-no-zope = ["cloudpickle", "hypothesis", "mypy (>=1.1.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] + +[[package]] +name = "azure-core" +version = "1.29.4" +description = "Microsoft Azure Core Library for Python" +optional = true +python-versions = ">=3.7" +files = [ + {file = "azure-core-1.29.4.tar.gz", hash = "sha256:500b3aa9bf2e90c5ccc88bb105d056114ca0ce7d0ce73afb8bc4d714b2fc7568"}, + {file = "azure_core-1.29.4-py3-none-any.whl", hash = "sha256:b03261bcba22c0b9290faf9999cedd23e849ed2577feee90515694cea6bc74bf"}, +] + +[package.dependencies] +requests = ">=2.18.4" +six = ">=1.11.0" +typing-extensions = ">=4.6.0" + +[package.extras] +aio = ["aiohttp (>=3.0)"] + +[[package]] +name = "azure-datalake-store" +version = "0.0.53" +description = "Azure Data Lake Store Filesystem Client Library for Python" +optional = true +python-versions = "*" +files = [ + {file = "azure-datalake-store-0.0.53.tar.gz", hash = "sha256:05b6de62ee3f2a0a6e6941e6933b792b800c3e7f6ffce2fc324bc19875757393"}, + {file = "azure_datalake_store-0.0.53-py2.py3-none-any.whl", hash = "sha256:a30c902a6e360aa47d7f69f086b426729784e71c536f330b691647a51dc42b2b"}, +] + +[package.dependencies] +cffi = "*" +msal = ">=1.16.0,<2" +requests = ">=2.20.0" + +[[package]] +name = "azure-identity" +version = "1.14.0" +description = "Microsoft Azure Identity Library for Python" +optional = true +python-versions = ">=3.7" +files = [ + {file = "azure-identity-1.14.0.zip", hash = "sha256:72441799f8c5c89bfe21026965e266672a7c5d050c2c65119ef899dd5362e2b1"}, + {file = "azure_identity-1.14.0-py3-none-any.whl", hash = "sha256:edabf0e010eb85760e1dd19424d5e8f97ba2c9caff73a16e7b30ccbdbcce369b"}, +] + +[package.dependencies] +azure-core = ">=1.11.0,<2.0.0" +cryptography = ">=2.5" +msal = ">=1.20.0,<2.0.0" +msal-extensions = ">=0.3.0,<2.0.0" + +[[package]] +name = "azure-storage-blob" +version = "12.18.1" +description = "Microsoft Azure Blob Storage Client Library for Python" +optional = true +python-versions = ">=3.7" +files = [ + {file = "azure-storage-blob-12.18.1.tar.gz", hash = "sha256:d3265c2403c28d8881326c365e9cf7ed2ad55fdac98404eae753548702b31ba2"}, + {file = "azure_storage_blob-12.18.1-py3-none-any.whl", hash = "sha256:00b92568e91d608c04dfd4814c3b180818e690023493bb984c22dfc1a8a96e55"}, +] + +[package.dependencies] +azure-core = ">=1.28.0,<2.0.0" +cryptography = ">=2.1.4" +isodate = ">=0.6.1" +typing-extensions = ">=4.3.0" + +[package.extras] +aio = ["azure-core[aio] (>=1.28.0,<2.0.0)"] + +[[package]] +name = "boto3" +version = "1.28.17" +description = "The AWS SDK for Python" +optional = false +python-versions = ">= 3.7" +files = [ + {file = "boto3-1.28.17-py3-none-any.whl", hash = "sha256:bca0526f819e0f19c0f1e6eba3e2d1d6b6a92a45129f98c0d716e5aab6d9444b"}, + {file = "boto3-1.28.17.tar.gz", hash = "sha256:90f7cfb5e1821af95b1fc084bc50e6c47fa3edc99f32de1a2591faa0c546bea7"}, +] + +[package.dependencies] +botocore = ">=1.31.17,<1.32.0" +jmespath = ">=0.7.1,<2.0.0" +s3transfer = ">=0.6.0,<0.7.0" + +[package.extras] +crt = ["botocore[crt] (>=1.21.0,<2.0a0)"] + +[[package]] +name = "botocore" +version = "1.31.17" +description = "Low-level, data-driven core of boto 3." +optional = false +python-versions = ">= 3.7" +files = [ + {file = "botocore-1.31.17-py3-none-any.whl", hash = "sha256:6ac34a1d34aa3750e78b77b8596617e2bab938964694d651939dba2cbde2c12b"}, + {file = "botocore-1.31.17.tar.gz", hash = "sha256:396459065dba4339eb4da4ec8b4e6599728eb89b7caaceea199e26f7d824a41c"}, +] + +[package.dependencies] +jmespath = ">=0.7.1,<2.0.0" +python-dateutil = ">=2.1,<3.0.0" +urllib3 = ">=1.25.4,<1.27" + +[package.extras] +crt = ["awscrt (==0.16.26)"] + +[[package]] +name = "build" +version = "1.0.3" +description = "A simple, correct Python build frontend" +optional = false +python-versions = ">= 3.7" +files = [ + {file = "build-1.0.3-py3-none-any.whl", hash = "sha256:589bf99a67df7c9cf07ec0ac0e5e2ea5d4b37ac63301c4986d1acb126aa83f8f"}, + {file = "build-1.0.3.tar.gz", hash = "sha256:538aab1b64f9828977f84bc63ae570b060a8ed1be419e7870b8b4fc5e6ea553b"}, +] + +[package.dependencies] +colorama = {version = "*", markers = "os_name == \"nt\""} +importlib-metadata = {version = ">=4.6", markers = "python_version < \"3.10\""} +packaging = ">=19.0" +pyproject_hooks = "*" +tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} + +[package.extras] +docs = ["furo (>=2023.08.17)", "sphinx (>=7.0,<8.0)", "sphinx-argparse-cli (>=1.5)", "sphinx-autodoc-typehints (>=1.10)", "sphinx-issues (>=3.0.0)"] +test = ["filelock (>=3)", "pytest (>=6.2.4)", "pytest-cov (>=2.12)", "pytest-mock (>=2)", "pytest-rerunfailures (>=9.1)", "pytest-xdist (>=1.34)", "setuptools (>=42.0.0)", "setuptools (>=56.0.0)", "setuptools (>=56.0.0)", "setuptools (>=67.8.0)", "wheel (>=0.36.0)"] +typing = ["importlib-metadata (>=5.1)", "mypy (>=1.5.0,<1.6.0)", "tomli", "typing-extensions (>=3.7.4.3)"] +virtualenv = ["virtualenv (>=20.0.35)"] + +[[package]] +name = "cachetools" +version = "5.3.1" +description = "Extensible memoizing collections and decorators" +optional = true +python-versions = ">=3.7" +files = [ + {file = "cachetools-5.3.1-py3-none-any.whl", hash = "sha256:95ef631eeaea14ba2e36f06437f36463aac3a096799e876ee55e5cdccb102590"}, + {file = "cachetools-5.3.1.tar.gz", hash = "sha256:dce83f2d9b4e1f732a8cd44af8e8fab2dbe46201467fc98b3ef8f269092bf62b"}, +] + +[[package]] +name = "certifi" +version = "2023.7.22" +description = "Python package for providing Mozilla's CA Bundle." +optional = false +python-versions = ">=3.6" +files = [ + {file = "certifi-2023.7.22-py3-none-any.whl", hash = "sha256:92d6037539857d8206b8f6ae472e8b77db8058fec5937a1ef3f54304089edbb9"}, + {file = "certifi-2023.7.22.tar.gz", hash = "sha256:539cc1d13202e33ca466e88b2807e29f4c13049d6d87031a3c110744495cb082"}, +] + +[[package]] +name = "cffi" +version = "1.15.1" +description = "Foreign Function Interface for Python calling C code." +optional = false +python-versions = "*" +files = [ + {file = "cffi-1.15.1-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:a66d3508133af6e8548451b25058d5812812ec3798c886bf38ed24a98216fab2"}, + {file = "cffi-1.15.1-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:470c103ae716238bbe698d67ad020e1db9d9dba34fa5a899b5e21577e6d52ed2"}, + {file = "cffi-1.15.1-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:9ad5db27f9cabae298d151c85cf2bad1d359a1b9c686a275df03385758e2f914"}, + {file = "cffi-1.15.1-cp27-cp27m-win32.whl", hash = "sha256:b3bbeb01c2b273cca1e1e0c5df57f12dce9a4dd331b4fa1635b8bec26350bde3"}, + {file = "cffi-1.15.1-cp27-cp27m-win_amd64.whl", hash = "sha256:e00b098126fd45523dd056d2efba6c5a63b71ffe9f2bbe1a4fe1716e1d0c331e"}, + {file = "cffi-1.15.1-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:d61f4695e6c866a23a21acab0509af1cdfd2c013cf256bbf5b6b5e2695827162"}, + {file = "cffi-1.15.1-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:ed9cb427ba5504c1dc15ede7d516b84757c3e3d7868ccc85121d9310d27eed0b"}, + {file = "cffi-1.15.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:39d39875251ca8f612b6f33e6b1195af86d1b3e60086068be9cc053aa4376e21"}, + {file = "cffi-1.15.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:285d29981935eb726a4399badae8f0ffdff4f5050eaa6d0cfc3f64b857b77185"}, + {file = "cffi-1.15.1-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3eb6971dcff08619f8d91607cfc726518b6fa2a9eba42856be181c6d0d9515fd"}, + {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:21157295583fe8943475029ed5abdcf71eb3911894724e360acff1d61c1d54bc"}, + {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5635bd9cb9731e6d4a1132a498dd34f764034a8ce60cef4f5319c0541159392f"}, + {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2012c72d854c2d03e45d06ae57f40d78e5770d252f195b93f581acf3ba44496e"}, + {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd86c085fae2efd48ac91dd7ccffcfc0571387fe1193d33b6394db7ef31fe2a4"}, + {file = "cffi-1.15.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:fa6693661a4c91757f4412306191b6dc88c1703f780c8234035eac011922bc01"}, + {file = "cffi-1.15.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:59c0b02d0a6c384d453fece7566d1c7e6b7bae4fc5874ef2ef46d56776d61c9e"}, + {file = "cffi-1.15.1-cp310-cp310-win32.whl", hash = "sha256:cba9d6b9a7d64d4bd46167096fc9d2f835e25d7e4c121fb2ddfc6528fb0413b2"}, + {file = "cffi-1.15.1-cp310-cp310-win_amd64.whl", hash = "sha256:ce4bcc037df4fc5e3d184794f27bdaab018943698f4ca31630bc7f84a7b69c6d"}, + {file = "cffi-1.15.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3d08afd128ddaa624a48cf2b859afef385b720bb4b43df214f85616922e6a5ac"}, + {file = "cffi-1.15.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3799aecf2e17cf585d977b780ce79ff0dc9b78d799fc694221ce814c2c19db83"}, + {file = "cffi-1.15.1-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a591fe9e525846e4d154205572a029f653ada1a78b93697f3b5a8f1f2bc055b9"}, + {file = "cffi-1.15.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3548db281cd7d2561c9ad9984681c95f7b0e38881201e157833a2342c30d5e8c"}, + {file = "cffi-1.15.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:91fc98adde3d7881af9b59ed0294046f3806221863722ba7d8d120c575314325"}, + {file = "cffi-1.15.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:94411f22c3985acaec6f83c6df553f2dbe17b698cc7f8ae751ff2237d96b9e3c"}, + {file = "cffi-1.15.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:03425bdae262c76aad70202debd780501fabeaca237cdfddc008987c0e0f59ef"}, + {file = "cffi-1.15.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:cc4d65aeeaa04136a12677d3dd0b1c0c94dc43abac5860ab33cceb42b801c1e8"}, + {file = "cffi-1.15.1-cp311-cp311-win32.whl", hash = "sha256:a0f100c8912c114ff53e1202d0078b425bee3649ae34d7b070e9697f93c5d52d"}, + {file = "cffi-1.15.1-cp311-cp311-win_amd64.whl", hash = "sha256:04ed324bda3cda42b9b695d51bb7d54b680b9719cfab04227cdd1e04e5de3104"}, + {file = "cffi-1.15.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50a74364d85fd319352182ef59c5c790484a336f6db772c1a9231f1c3ed0cbd7"}, + {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e263d77ee3dd201c3a142934a086a4450861778baaeeb45db4591ef65550b0a6"}, + {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cec7d9412a9102bdc577382c3929b337320c4c4c4849f2c5cdd14d7368c5562d"}, + {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4289fc34b2f5316fbb762d75362931e351941fa95fa18789191b33fc4cf9504a"}, + {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:173379135477dc8cac4bc58f45db08ab45d228b3363adb7af79436135d028405"}, + {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:6975a3fac6bc83c4a65c9f9fcab9e47019a11d3d2cf7f3c0d03431bf145a941e"}, + {file = "cffi-1.15.1-cp36-cp36m-win32.whl", hash = "sha256:2470043b93ff09bf8fb1d46d1cb756ce6132c54826661a32d4e4d132e1977adf"}, + {file = "cffi-1.15.1-cp36-cp36m-win_amd64.whl", hash = "sha256:30d78fbc8ebf9c92c9b7823ee18eb92f2e6ef79b45ac84db507f52fbe3ec4497"}, + {file = "cffi-1.15.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:198caafb44239b60e252492445da556afafc7d1e3ab7a1fb3f0584ef6d742375"}, + {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5ef34d190326c3b1f822a5b7a45f6c4535e2f47ed06fec77d3d799c450b2651e"}, + {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8102eaf27e1e448db915d08afa8b41d6c7ca7a04b7d73af6514df10a3e74bd82"}, + {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5df2768244d19ab7f60546d0c7c63ce1581f7af8b5de3eb3004b9b6fc8a9f84b"}, + {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a8c4917bd7ad33e8eb21e9a5bbba979b49d9a97acb3a803092cbc1133e20343c"}, + {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0e2642fe3142e4cc4af0799748233ad6da94c62a8bec3a6648bf8ee68b1c7426"}, + {file = "cffi-1.15.1-cp37-cp37m-win32.whl", hash = "sha256:e229a521186c75c8ad9490854fd8bbdd9a0c9aa3a524326b55be83b54d4e0ad9"}, + {file = "cffi-1.15.1-cp37-cp37m-win_amd64.whl", hash = "sha256:a0b71b1b8fbf2b96e41c4d990244165e2c9be83d54962a9a1d118fd8657d2045"}, + {file = "cffi-1.15.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:320dab6e7cb2eacdf0e658569d2575c4dad258c0fcc794f46215e1e39f90f2c3"}, + {file = "cffi-1.15.1-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1e74c6b51a9ed6589199c787bf5f9875612ca4a8a0785fb2d4a84429badaf22a"}, + {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a5c84c68147988265e60416b57fc83425a78058853509c1b0629c180094904a5"}, + {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3b926aa83d1edb5aa5b427b4053dc420ec295a08e40911296b9eb1b6170f6cca"}, + {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:87c450779d0914f2861b8526e035c5e6da0a3199d8f1add1a665e1cbc6fc6d02"}, + {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4f2c9f67e9821cad2e5f480bc8d83b8742896f1242dba247911072d4fa94c192"}, + {file = "cffi-1.15.1-cp38-cp38-win32.whl", hash = "sha256:8b7ee99e510d7b66cdb6c593f21c043c248537a32e0bedf02e01e9553a172314"}, + {file = "cffi-1.15.1-cp38-cp38-win_amd64.whl", hash = "sha256:00a9ed42e88df81ffae7a8ab6d9356b371399b91dbdf0c3cb1e84c03a13aceb5"}, + {file = "cffi-1.15.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:54a2db7b78338edd780e7ef7f9f6c442500fb0d41a5a4ea24fff1c929d5af585"}, + {file = "cffi-1.15.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:fcd131dd944808b5bdb38e6f5b53013c5aa4f334c5cad0c72742f6eba4b73db0"}, + {file = "cffi-1.15.1-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7473e861101c9e72452f9bf8acb984947aa1661a7704553a9f6e4baa5ba64415"}, + {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6c9a799e985904922a4d207a94eae35c78ebae90e128f0c4e521ce339396be9d"}, + {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3bcde07039e586f91b45c88f8583ea7cf7a0770df3a1649627bf598332cb6984"}, + {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:33ab79603146aace82c2427da5ca6e58f2b3f2fb5da893ceac0c42218a40be35"}, + {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5d598b938678ebf3c67377cdd45e09d431369c3b1a5b331058c338e201f12b27"}, + {file = "cffi-1.15.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:db0fbb9c62743ce59a9ff687eb5f4afbe77e5e8403d6697f7446e5f609976f76"}, + {file = "cffi-1.15.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:98d85c6a2bef81588d9227dde12db8a7f47f639f4a17c9ae08e773aa9c697bf3"}, + {file = "cffi-1.15.1-cp39-cp39-win32.whl", hash = "sha256:40f4774f5a9d4f5e344f31a32b5096977b5d48560c5592e2f3d2c4374bd543ee"}, + {file = "cffi-1.15.1-cp39-cp39-win_amd64.whl", hash = "sha256:70df4e3b545a17496c9b3f41f5115e69a4f2e77e94e1d2a8e1070bc0c38c8a3c"}, + {file = "cffi-1.15.1.tar.gz", hash = "sha256:d400bfb9a37b1351253cb402671cea7e89bdecc294e8016a707f6d1d8ac934f9"}, +] + +[package.dependencies] +pycparser = "*" + +[[package]] +name = "cfgv" +version = "3.4.0" +description = "Validate configuration and produce human readable error messages." +optional = false +python-versions = ">=3.8" +files = [ + {file = "cfgv-3.4.0-py2.py3-none-any.whl", hash = "sha256:b7265b1f29fd3316bfcd2b330d63d024f2bfd8bcb8b0272f8e19a504856c48f9"}, + {file = "cfgv-3.4.0.tar.gz", hash = "sha256:e52591d4c5f5dead8e0f673fb16db7949d2cfb3f7da4582893288f0ded8fe560"}, +] + +[[package]] +name = "charset-normalizer" +version = "3.2.0" +description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." +optional = false +python-versions = ">=3.7.0" +files = [ + {file = "charset-normalizer-3.2.0.tar.gz", hash = "sha256:3bb3d25a8e6c0aedd251753a79ae98a093c7e7b471faa3aa9a93a81431987ace"}, + {file = "charset_normalizer-3.2.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:0b87549028f680ca955556e3bd57013ab47474c3124dc069faa0b6545b6c9710"}, + {file = "charset_normalizer-3.2.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:7c70087bfee18a42b4040bb9ec1ca15a08242cf5867c58726530bdf3945672ed"}, + {file = "charset_normalizer-3.2.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a103b3a7069b62f5d4890ae1b8f0597618f628b286b03d4bc9195230b154bfa9"}, + {file = "charset_normalizer-3.2.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:94aea8eff76ee6d1cdacb07dd2123a68283cb5569e0250feab1240058f53b623"}, + {file = "charset_normalizer-3.2.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:db901e2ac34c931d73054d9797383d0f8009991e723dab15109740a63e7f902a"}, + {file = "charset_normalizer-3.2.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b0dac0ff919ba34d4df1b6131f59ce95b08b9065233446be7e459f95554c0dc8"}, + {file = "charset_normalizer-3.2.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:193cbc708ea3aca45e7221ae58f0fd63f933753a9bfb498a3b474878f12caaad"}, + {file = "charset_normalizer-3.2.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:09393e1b2a9461950b1c9a45d5fd251dc7c6f228acab64da1c9c0165d9c7765c"}, + {file = "charset_normalizer-3.2.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:baacc6aee0b2ef6f3d308e197b5d7a81c0e70b06beae1f1fcacffdbd124fe0e3"}, + {file = "charset_normalizer-3.2.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:bf420121d4c8dce6b889f0e8e4ec0ca34b7f40186203f06a946fa0276ba54029"}, + {file = "charset_normalizer-3.2.0-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:c04a46716adde8d927adb9457bbe39cf473e1e2c2f5d0a16ceb837e5d841ad4f"}, + {file = "charset_normalizer-3.2.0-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:aaf63899c94de41fe3cf934601b0f7ccb6b428c6e4eeb80da72c58eab077b19a"}, + {file = "charset_normalizer-3.2.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:d62e51710986674142526ab9f78663ca2b0726066ae26b78b22e0f5e571238dd"}, + {file = "charset_normalizer-3.2.0-cp310-cp310-win32.whl", hash = "sha256:04e57ab9fbf9607b77f7d057974694b4f6b142da9ed4a199859d9d4d5c63fe96"}, + {file = "charset_normalizer-3.2.0-cp310-cp310-win_amd64.whl", hash = "sha256:48021783bdf96e3d6de03a6e39a1171ed5bd7e8bb93fc84cc649d11490f87cea"}, + {file = "charset_normalizer-3.2.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:4957669ef390f0e6719db3613ab3a7631e68424604a7b448f079bee145da6e09"}, + {file = "charset_normalizer-3.2.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:46fb8c61d794b78ec7134a715a3e564aafc8f6b5e338417cb19fe9f57a5a9bf2"}, + {file = "charset_normalizer-3.2.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f779d3ad205f108d14e99bb3859aa7dd8e9c68874617c72354d7ecaec2a054ac"}, + {file = "charset_normalizer-3.2.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f25c229a6ba38a35ae6e25ca1264621cc25d4d38dca2942a7fce0b67a4efe918"}, + {file = "charset_normalizer-3.2.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2efb1bd13885392adfda4614c33d3b68dee4921fd0ac1d3988f8cbb7d589e72a"}, + {file = "charset_normalizer-3.2.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1f30b48dd7fa1474554b0b0f3fdfdd4c13b5c737a3c6284d3cdc424ec0ffff3a"}, + {file = "charset_normalizer-3.2.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:246de67b99b6851627d945db38147d1b209a899311b1305dd84916f2b88526c6"}, + {file = "charset_normalizer-3.2.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9bd9b3b31adcb054116447ea22caa61a285d92e94d710aa5ec97992ff5eb7cf3"}, + {file = "charset_normalizer-3.2.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:8c2f5e83493748286002f9369f3e6607c565a6a90425a3a1fef5ae32a36d749d"}, + {file = "charset_normalizer-3.2.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:3170c9399da12c9dc66366e9d14da8bf7147e1e9d9ea566067bbce7bb74bd9c2"}, + {file = "charset_normalizer-3.2.0-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:7a4826ad2bd6b07ca615c74ab91f32f6c96d08f6fcc3902ceeedaec8cdc3bcd6"}, + {file = "charset_normalizer-3.2.0-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:3b1613dd5aee995ec6d4c69f00378bbd07614702a315a2cf6c1d21461fe17c23"}, + {file = "charset_normalizer-3.2.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:9e608aafdb55eb9f255034709e20d5a83b6d60c054df0802fa9c9883d0a937aa"}, + {file = "charset_normalizer-3.2.0-cp311-cp311-win32.whl", hash = "sha256:f2a1d0fd4242bd8643ce6f98927cf9c04540af6efa92323e9d3124f57727bfc1"}, + {file = "charset_normalizer-3.2.0-cp311-cp311-win_amd64.whl", hash = "sha256:681eb3d7e02e3c3655d1b16059fbfb605ac464c834a0c629048a30fad2b27489"}, + {file = "charset_normalizer-3.2.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:c57921cda3a80d0f2b8aec7e25c8aa14479ea92b5b51b6876d975d925a2ea346"}, + {file = "charset_normalizer-3.2.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:41b25eaa7d15909cf3ac4c96088c1f266a9a93ec44f87f1d13d4a0e86c81b982"}, + {file = "charset_normalizer-3.2.0-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f058f6963fd82eb143c692cecdc89e075fa0828db2e5b291070485390b2f1c9c"}, + {file = "charset_normalizer-3.2.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a7647ebdfb9682b7bb97e2a5e7cb6ae735b1c25008a70b906aecca294ee96cf4"}, + {file = "charset_normalizer-3.2.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eef9df1eefada2c09a5e7a40991b9fc6ac6ef20b1372abd48d2794a316dc0449"}, + {file = "charset_normalizer-3.2.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e03b8895a6990c9ab2cdcd0f2fe44088ca1c65ae592b8f795c3294af00a461c3"}, + {file = "charset_normalizer-3.2.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:ee4006268ed33370957f55bf2e6f4d263eaf4dc3cfc473d1d90baff6ed36ce4a"}, + {file = "charset_normalizer-3.2.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:c4983bf937209c57240cff65906b18bb35e64ae872da6a0db937d7b4af845dd7"}, + {file = "charset_normalizer-3.2.0-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:3bb7fda7260735efe66d5107fb7e6af6a7c04c7fce9b2514e04b7a74b06bf5dd"}, + {file = "charset_normalizer-3.2.0-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:72814c01533f51d68702802d74f77ea026b5ec52793c791e2da806a3844a46c3"}, + {file = "charset_normalizer-3.2.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:70c610f6cbe4b9fce272c407dd9d07e33e6bf7b4aa1b7ffb6f6ded8e634e3592"}, + {file = "charset_normalizer-3.2.0-cp37-cp37m-win32.whl", hash = "sha256:a401b4598e5d3f4a9a811f3daf42ee2291790c7f9d74b18d75d6e21dda98a1a1"}, + {file = "charset_normalizer-3.2.0-cp37-cp37m-win_amd64.whl", hash = "sha256:c0b21078a4b56965e2b12f247467b234734491897e99c1d51cee628da9786959"}, + {file = "charset_normalizer-3.2.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:95eb302ff792e12aba9a8b8f8474ab229a83c103d74a750ec0bd1c1eea32e669"}, + {file = "charset_normalizer-3.2.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1a100c6d595a7f316f1b6f01d20815d916e75ff98c27a01ae817439ea7726329"}, + {file = "charset_normalizer-3.2.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:6339d047dab2780cc6220f46306628e04d9750f02f983ddb37439ca47ced7149"}, + {file = "charset_normalizer-3.2.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e4b749b9cc6ee664a3300bb3a273c1ca8068c46be705b6c31cf5d276f8628a94"}, + {file = "charset_normalizer-3.2.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a38856a971c602f98472050165cea2cdc97709240373041b69030be15047691f"}, + {file = "charset_normalizer-3.2.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f87f746ee241d30d6ed93969de31e5ffd09a2961a051e60ae6bddde9ec3583aa"}, + {file = "charset_normalizer-3.2.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:89f1b185a01fe560bc8ae5f619e924407efca2191b56ce749ec84982fc59a32a"}, + {file = "charset_normalizer-3.2.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e1c8a2f4c69e08e89632defbfabec2feb8a8d99edc9f89ce33c4b9e36ab63037"}, + {file = "charset_normalizer-3.2.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:2f4ac36d8e2b4cc1aa71df3dd84ff8efbe3bfb97ac41242fbcfc053c67434f46"}, + {file = "charset_normalizer-3.2.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:a386ebe437176aab38c041de1260cd3ea459c6ce5263594399880bbc398225b2"}, + {file = "charset_normalizer-3.2.0-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:ccd16eb18a849fd8dcb23e23380e2f0a354e8daa0c984b8a732d9cfaba3a776d"}, + {file = "charset_normalizer-3.2.0-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:e6a5bf2cba5ae1bb80b154ed68a3cfa2fa00fde979a7f50d6598d3e17d9ac20c"}, + {file = "charset_normalizer-3.2.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:45de3f87179c1823e6d9e32156fb14c1927fcc9aba21433f088fdfb555b77c10"}, + {file = "charset_normalizer-3.2.0-cp38-cp38-win32.whl", hash = "sha256:1000fba1057b92a65daec275aec30586c3de2401ccdcd41f8a5c1e2c87078706"}, + {file = "charset_normalizer-3.2.0-cp38-cp38-win_amd64.whl", hash = "sha256:8b2c760cfc7042b27ebdb4a43a4453bd829a5742503599144d54a032c5dc7e9e"}, + {file = "charset_normalizer-3.2.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:855eafa5d5a2034b4621c74925d89c5efef61418570e5ef9b37717d9c796419c"}, + {file = "charset_normalizer-3.2.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:203f0c8871d5a7987be20c72442488a0b8cfd0f43b7973771640fc593f56321f"}, + {file = "charset_normalizer-3.2.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e857a2232ba53ae940d3456f7533ce6ca98b81917d47adc3c7fd55dad8fab858"}, + {file = "charset_normalizer-3.2.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5e86d77b090dbddbe78867a0275cb4df08ea195e660f1f7f13435a4649e954e5"}, + {file = "charset_normalizer-3.2.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c4fb39a81950ec280984b3a44f5bd12819953dc5fa3a7e6fa7a80db5ee853952"}, + {file = "charset_normalizer-3.2.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2dee8e57f052ef5353cf608e0b4c871aee320dd1b87d351c28764fc0ca55f9f4"}, + {file = "charset_normalizer-3.2.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8700f06d0ce6f128de3ccdbc1acaea1ee264d2caa9ca05daaf492fde7c2a7200"}, + {file = "charset_normalizer-3.2.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1920d4ff15ce893210c1f0c0e9d19bfbecb7983c76b33f046c13a8ffbd570252"}, + {file = "charset_normalizer-3.2.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:c1c76a1743432b4b60ab3358c937a3fe1341c828ae6194108a94c69028247f22"}, + {file = "charset_normalizer-3.2.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:f7560358a6811e52e9c4d142d497f1a6e10103d3a6881f18d04dbce3729c0e2c"}, + {file = "charset_normalizer-3.2.0-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:c8063cf17b19661471ecbdb3df1c84f24ad2e389e326ccaf89e3fb2484d8dd7e"}, + {file = "charset_normalizer-3.2.0-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:cd6dbe0238f7743d0efe563ab46294f54f9bc8f4b9bcf57c3c666cc5bc9d1299"}, + {file = "charset_normalizer-3.2.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:1249cbbf3d3b04902ff081ffbb33ce3377fa6e4c7356f759f3cd076cc138d020"}, + {file = "charset_normalizer-3.2.0-cp39-cp39-win32.whl", hash = "sha256:6c409c0deba34f147f77efaa67b8e4bb83d2f11c8806405f76397ae5b8c0d1c9"}, + {file = "charset_normalizer-3.2.0-cp39-cp39-win_amd64.whl", hash = "sha256:7095f6fbfaa55defb6b733cfeb14efaae7a29f0b59d8cf213be4e7ca0b857b80"}, + {file = "charset_normalizer-3.2.0-py3-none-any.whl", hash = "sha256:8e098148dd37b4ce3baca71fb394c81dc5d9c7728c95df695d2dca218edf40e6"}, +] + +[[package]] +name = "click" +version = "8.1.7" +description = "Composable command line interface toolkit" +optional = false +python-versions = ">=3.7" +files = [ + {file = "click-8.1.7-py3-none-any.whl", hash = "sha256:ae74fb96c20a0277a1d615f1e4d73c8414f5a98db8b799a7931d1582f3390c28"}, + {file = "click-8.1.7.tar.gz", hash = "sha256:ca9853ad459e787e2192211578cc907e7594e294c7ccc834310722b41b9ca6de"}, +] + +[package.dependencies] +colorama = {version = "*", markers = "platform_system == \"Windows\""} + +[[package]] +name = "colorama" +version = "0.4.6" +description = "Cross-platform colored terminal text." +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" +files = [ + {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, + {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, +] + +[[package]] +name = "coverage" +version = "7.3.1" +description = "Code coverage measurement for Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "coverage-7.3.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:cd0f7429ecfd1ff597389907045ff209c8fdb5b013d38cfa7c60728cb484b6e3"}, + {file = "coverage-7.3.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:966f10df9b2b2115da87f50f6a248e313c72a668248be1b9060ce935c871f276"}, + {file = "coverage-7.3.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0575c37e207bb9b98b6cf72fdaaa18ac909fb3d153083400c2d48e2e6d28bd8e"}, + {file = "coverage-7.3.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:245c5a99254e83875c7fed8b8b2536f040997a9b76ac4c1da5bff398c06e860f"}, + {file = "coverage-7.3.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4c96dd7798d83b960afc6c1feb9e5af537fc4908852ef025600374ff1a017392"}, + {file = "coverage-7.3.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:de30c1aa80f30af0f6b2058a91505ea6e36d6535d437520067f525f7df123887"}, + {file = "coverage-7.3.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:50dd1e2dd13dbbd856ffef69196781edff26c800a74f070d3b3e3389cab2600d"}, + {file = "coverage-7.3.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:b9c0c19f70d30219113b18fe07e372b244fb2a773d4afde29d5a2f7930765136"}, + {file = "coverage-7.3.1-cp310-cp310-win32.whl", hash = "sha256:770f143980cc16eb601ccfd571846e89a5fe4c03b4193f2e485268f224ab602f"}, + {file = "coverage-7.3.1-cp310-cp310-win_amd64.whl", hash = "sha256:cdd088c00c39a27cfa5329349cc763a48761fdc785879220d54eb785c8a38520"}, + {file = "coverage-7.3.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:74bb470399dc1989b535cb41f5ca7ab2af561e40def22d7e188e0a445e7639e3"}, + {file = "coverage-7.3.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:025ded371f1ca280c035d91b43252adbb04d2aea4c7105252d3cbc227f03b375"}, + {file = "coverage-7.3.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a6191b3a6ad3e09b6cfd75b45c6aeeffe7e3b0ad46b268345d159b8df8d835f9"}, + {file = "coverage-7.3.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7eb0b188f30e41ddd659a529e385470aa6782f3b412f860ce22b2491c89b8593"}, + {file = "coverage-7.3.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:75c8f0df9dfd8ff745bccff75867d63ef336e57cc22b2908ee725cc552689ec8"}, + {file = "coverage-7.3.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:7eb3cd48d54b9bd0e73026dedce44773214064be93611deab0b6a43158c3d5a0"}, + {file = "coverage-7.3.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:ac3c5b7e75acac31e490b7851595212ed951889918d398b7afa12736c85e13ce"}, + {file = "coverage-7.3.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:5b4ee7080878077af0afa7238df1b967f00dc10763f6e1b66f5cced4abebb0a3"}, + {file = "coverage-7.3.1-cp311-cp311-win32.whl", hash = "sha256:229c0dd2ccf956bf5aeede7e3131ca48b65beacde2029f0361b54bf93d36f45a"}, + {file = "coverage-7.3.1-cp311-cp311-win_amd64.whl", hash = "sha256:c6f55d38818ca9596dc9019eae19a47410d5322408140d9a0076001a3dcb938c"}, + {file = "coverage-7.3.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:5289490dd1c3bb86de4730a92261ae66ea8d44b79ed3cc26464f4c2cde581fbc"}, + {file = "coverage-7.3.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ca833941ec701fda15414be400c3259479bfde7ae6d806b69e63b3dc423b1832"}, + {file = "coverage-7.3.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cd694e19c031733e446c8024dedd12a00cda87e1c10bd7b8539a87963685e969"}, + {file = "coverage-7.3.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:aab8e9464c00da5cb9c536150b7fbcd8850d376d1151741dd0d16dfe1ba4fd26"}, + {file = "coverage-7.3.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:87d38444efffd5b056fcc026c1e8d862191881143c3aa80bb11fcf9dca9ae204"}, + {file = "coverage-7.3.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:8a07b692129b8a14ad7a37941a3029c291254feb7a4237f245cfae2de78de037"}, + {file = "coverage-7.3.1-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:2829c65c8faaf55b868ed7af3c7477b76b1c6ebeee99a28f59a2cb5907a45760"}, + {file = "coverage-7.3.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:1f111a7d85658ea52ffad7084088277135ec5f368457275fc57f11cebb15607f"}, + {file = "coverage-7.3.1-cp312-cp312-win32.whl", hash = "sha256:c397c70cd20f6df7d2a52283857af622d5f23300c4ca8e5bd8c7a543825baa5a"}, + {file = "coverage-7.3.1-cp312-cp312-win_amd64.whl", hash = "sha256:5ae4c6da8b3d123500f9525b50bf0168023313963e0e2e814badf9000dd6ef92"}, + {file = "coverage-7.3.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:ca70466ca3a17460e8fc9cea7123c8cbef5ada4be3140a1ef8f7b63f2f37108f"}, + {file = "coverage-7.3.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:f2781fd3cabc28278dc982a352f50c81c09a1a500cc2086dc4249853ea96b981"}, + {file = "coverage-7.3.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6407424621f40205bbe6325686417e5e552f6b2dba3535dd1f90afc88a61d465"}, + {file = "coverage-7.3.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:04312b036580ec505f2b77cbbdfb15137d5efdfade09156961f5277149f5e344"}, + {file = "coverage-7.3.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ac9ad38204887349853d7c313f53a7b1c210ce138c73859e925bc4e5d8fc18e7"}, + {file = "coverage-7.3.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:53669b79f3d599da95a0afbef039ac0fadbb236532feb042c534fbb81b1a4e40"}, + {file = "coverage-7.3.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:614f1f98b84eb256e4f35e726bfe5ca82349f8dfa576faabf8a49ca09e630086"}, + {file = "coverage-7.3.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:f1a317fdf5c122ad642db8a97964733ab7c3cf6009e1a8ae8821089993f175ff"}, + {file = "coverage-7.3.1-cp38-cp38-win32.whl", hash = "sha256:defbbb51121189722420a208957e26e49809feafca6afeef325df66c39c4fdb3"}, + {file = "coverage-7.3.1-cp38-cp38-win_amd64.whl", hash = "sha256:f4f456590eefb6e1b3c9ea6328c1e9fa0f1006e7481179d749b3376fc793478e"}, + {file = "coverage-7.3.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:f12d8b11a54f32688b165fd1a788c408f927b0960984b899be7e4c190ae758f1"}, + {file = "coverage-7.3.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f09195dda68d94a53123883de75bb97b0e35f5f6f9f3aa5bf6e496da718f0cb6"}, + {file = "coverage-7.3.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c6601a60318f9c3945be6ea0f2a80571f4299b6801716f8a6e4846892737ebe4"}, + {file = "coverage-7.3.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:07d156269718670d00a3b06db2288b48527fc5f36859425ff7cec07c6b367745"}, + {file = "coverage-7.3.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:636a8ac0b044cfeccae76a36f3b18264edcc810a76a49884b96dd744613ec0b7"}, + {file = "coverage-7.3.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:5d991e13ad2ed3aced177f524e4d670f304c8233edad3210e02c465351f785a0"}, + {file = "coverage-7.3.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:586649ada7cf139445da386ab6f8ef00e6172f11a939fc3b2b7e7c9082052fa0"}, + {file = "coverage-7.3.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:4aba512a15a3e1e4fdbfed2f5392ec221434a614cc68100ca99dcad7af29f3f8"}, + {file = "coverage-7.3.1-cp39-cp39-win32.whl", hash = "sha256:6bc6f3f4692d806831c136c5acad5ccedd0262aa44c087c46b7101c77e139140"}, + {file = "coverage-7.3.1-cp39-cp39-win_amd64.whl", hash = "sha256:553d7094cb27db58ea91332e8b5681bac107e7242c23f7629ab1316ee73c4981"}, + {file = "coverage-7.3.1-pp38.pp39.pp310-none-any.whl", hash = "sha256:220eb51f5fb38dfdb7e5d54284ca4d0cd70ddac047d750111a68ab1798945194"}, + {file = "coverage-7.3.1.tar.gz", hash = "sha256:6cb7fe1581deb67b782c153136541e20901aa312ceedaf1467dcb35255787952"}, +] + +[package.dependencies] +tomli = {version = "*", optional = true, markers = "python_full_version <= \"3.11.0a6\" and extra == \"toml\""} + +[package.extras] +toml = ["tomli"] + +[[package]] +name = "cryptography" +version = "41.0.4" +description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers." +optional = false +python-versions = ">=3.7" +files = [ + {file = "cryptography-41.0.4-cp37-abi3-macosx_10_12_universal2.whl", hash = "sha256:80907d3faa55dc5434a16579952ac6da800935cd98d14dbd62f6f042c7f5e839"}, + {file = "cryptography-41.0.4-cp37-abi3-macosx_10_12_x86_64.whl", hash = "sha256:35c00f637cd0b9d5b6c6bd11b6c3359194a8eba9c46d4e875a3660e3b400005f"}, + {file = "cryptography-41.0.4-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cecfefa17042941f94ab54f769c8ce0fe14beff2694e9ac684176a2535bf9714"}, + {file = "cryptography-41.0.4-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e40211b4923ba5a6dc9769eab704bdb3fbb58d56c5b336d30996c24fcf12aadb"}, + {file = "cryptography-41.0.4-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:23a25c09dfd0d9f28da2352503b23e086f8e78096b9fd585d1d14eca01613e13"}, + {file = "cryptography-41.0.4-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:2ed09183922d66c4ec5fdaa59b4d14e105c084dd0febd27452de8f6f74704143"}, + {file = "cryptography-41.0.4-cp37-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:5a0f09cefded00e648a127048119f77bc2b2ec61e736660b5789e638f43cc397"}, + {file = "cryptography-41.0.4-cp37-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:9eeb77214afae972a00dee47382d2591abe77bdae166bda672fb1e24702a3860"}, + {file = "cryptography-41.0.4-cp37-abi3-win32.whl", hash = "sha256:3b224890962a2d7b57cf5eeb16ccaafba6083f7b811829f00476309bce2fe0fd"}, + {file = "cryptography-41.0.4-cp37-abi3-win_amd64.whl", hash = "sha256:c880eba5175f4307129784eca96f4e70b88e57aa3f680aeba3bab0e980b0f37d"}, + {file = "cryptography-41.0.4-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:004b6ccc95943f6a9ad3142cfabcc769d7ee38a3f60fb0dddbfb431f818c3a67"}, + {file = "cryptography-41.0.4-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:86defa8d248c3fa029da68ce61fe735432b047e32179883bdb1e79ed9bb8195e"}, + {file = "cryptography-41.0.4-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:37480760ae08065437e6573d14be973112c9e6dcaf5f11d00147ee74f37a3829"}, + {file = "cryptography-41.0.4-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:b5f4dfe950ff0479f1f00eda09c18798d4f49b98f4e2006d644b3301682ebdca"}, + {file = "cryptography-41.0.4-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:7e53db173370dea832190870e975a1e09c86a879b613948f09eb49324218c14d"}, + {file = "cryptography-41.0.4-pp38-pypy38_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:5b72205a360f3b6176485a333256b9bcd48700fc755fef51c8e7e67c4b63e3ac"}, + {file = "cryptography-41.0.4-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:93530900d14c37a46ce3d6c9e6fd35dbe5f5601bf6b3a5c325c7bffc030344d9"}, + {file = "cryptography-41.0.4-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:efc8ad4e6fc4f1752ebfb58aefece8b4e3c4cae940b0994d43649bdfce8d0d4f"}, + {file = "cryptography-41.0.4-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:c3391bd8e6de35f6f1140e50aaeb3e2b3d6a9012536ca23ab0d9c35ec18c8a91"}, + {file = "cryptography-41.0.4-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:0d9409894f495d465fe6fda92cb70e8323e9648af912d5b9141d616df40a87b8"}, + {file = "cryptography-41.0.4-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:8ac4f9ead4bbd0bc8ab2d318f97d85147167a488be0e08814a37eb2f439d5cf6"}, + {file = "cryptography-41.0.4-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:047c4603aeb4bbd8db2756e38f5b8bd7e94318c047cfe4efeb5d715e08b49311"}, + {file = "cryptography-41.0.4.tar.gz", hash = "sha256:7febc3094125fc126a7f6fb1f420d0da639f3f32cb15c8ff0dc3997c4549f51a"}, +] + +[package.dependencies] +cffi = ">=1.12" + +[package.extras] +docs = ["sphinx (>=5.3.0)", "sphinx-rtd-theme (>=1.1.1)"] +docstest = ["pyenchant (>=1.6.11)", "sphinxcontrib-spelling (>=4.0.1)", "twine (>=1.12.0)"] +nox = ["nox"] +pep8test = ["black", "check-sdist", "mypy", "ruff"] +sdist = ["build"] +ssh = ["bcrypt (>=3.1.5)"] +test = ["pretend", "pytest (>=6.2.0)", "pytest-benchmark", "pytest-cov", "pytest-xdist"] +test-randomorder = ["pytest-randomly"] + +[[package]] +name = "cython" +version = "3.0.2" +description = "The Cython compiler for writing C extensions in the Python language." +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +files = [ + {file = "Cython-3.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:8ccb91d2254e34724f1541b2a6fcdfacdb88284185b0097ae84e0ddf476c7a38"}, + {file = "Cython-3.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c298b1589205ecaaed0457ad05e0c8a43e7db2053607f48ed4a899cb6aa114df"}, + {file = "Cython-3.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e825e682cef76d0c33384f38b56b7e87c76152482a914dfc78faed6ff66ce05a"}, + {file = "Cython-3.0.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:77ec0134fc1b10aebef2013936a91c07bff2498ec283bc2eca099ee0cb94d12e"}, + {file = "Cython-3.0.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:c90eeb94395315e65fd758a2f86b92904fce7b50060b4d45a878ef6767f9276e"}, + {file = "Cython-3.0.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:38085523fa7a299638d051ae08144222785639882f6291bd275c0b12db1034ff"}, + {file = "Cython-3.0.2-cp310-cp310-win32.whl", hash = "sha256:b032cb0c69082f0665b2c5fb416d041157062f1538336d0edf823b9ee500e39c"}, + {file = "Cython-3.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:067b2b9eb487bd61367b296f11b7c1c70a084b3eb7d5a572f607cd1fc5ca5586"}, + {file = "Cython-3.0.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:213ff9f95de319e54b520bf31edd6aa7a1fa4fbf617c2beb0f92362595e6476a"}, + {file = "Cython-3.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4bebbca13078125a35937966137af4bd0300a0c66fd7ae4ce36adc049b13bdf3"}, + {file = "Cython-3.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d5e5587128e8c2423aefcffa4ded4ddf60d44898938fbb7c0f236636a750a94f"}, + {file = "Cython-3.0.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:78e2853d484643c6b7ac3bdb48392753442da1c71b689468fa3176b619bebe54"}, + {file = "Cython-3.0.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:c5e722732e9aa9bde667ed6d87525234823eb7766ca234cfb19d7e0c095a2ef4"}, + {file = "Cython-3.0.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:989787fc24a95100a26918b6577d06e15a8868a3ed267009c5cfcf1a906179ac"}, + {file = "Cython-3.0.2-cp311-cp311-win32.whl", hash = "sha256:d21801981db44b7e9f9768f121317946461d56b51de1e6eff3c42e8914048696"}, + {file = "Cython-3.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:809617cf4825b2138ce0ec827e1f28e39668743c81ac8286373f8d148c05f088"}, + {file = "Cython-3.0.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:5682293d344b7dbad97ce6eceb9e887aca6e53499709db9da726ca3424e5559d"}, + {file = "Cython-3.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7e08ff5da5f5b969639784b1bffcd880a0c0f048d182aed7cba9945ee8b367c2"}, + {file = "Cython-3.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8850269ff59f77a1629e26d0576701925360d732011d6d3516ccdc5b2c2bc310"}, + {file = "Cython-3.0.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:550b3fbe9b3c555b44ded934f4822f9fcc04dfcee512167ebcbbd370ccede20e"}, + {file = "Cython-3.0.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:4db017b104f47b1185237702f6ed2651839c8124614683efa7c489f3fa4e19d9"}, + {file = "Cython-3.0.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:75a2395cc7b78cff59be6e9b7f92bbb5d7b8d25203f6d3fb6f72bdb7d3f49777"}, + {file = "Cython-3.0.2-cp312-cp312-win32.whl", hash = "sha256:786b6034a91e886116bb562fe42f8bf0f97c3e00c02e56791d02675959ed65b1"}, + {file = "Cython-3.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:cc9d173ab8b167cae674f6deed8c65ba816574797a2bd6d8aa623277d1fa81ca"}, + {file = "Cython-3.0.2-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:8948504338d7a140ce588333177dcabf0743a68dbc83b0174f214f5b959634d5"}, + {file = "Cython-3.0.2-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a51efba0e136b2af358e5a347bae09678b17460c35cf1eab24f0476820348991"}, + {file = "Cython-3.0.2-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:05cb2a73810f045d328b7579cf98f550a9e601df5e282d1fea0512d8ad589011"}, + {file = "Cython-3.0.2-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:22ba78e48bdb65977928ecb275ac8c82df7b0eefa075078a1363a5af4606b42e"}, + {file = "Cython-3.0.2-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:302281b927409b3e0ef8cd9251eab782cf1acd2578eab305519fbae5d184b7e9"}, + {file = "Cython-3.0.2-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:a1c3675394b81024aaf56e4f53c2b4f81d9a116c7049e9d4706f810899c9134e"}, + {file = "Cython-3.0.2-cp36-cp36m-win32.whl", hash = "sha256:34f7b014ebce5d325c8084e396c81cdafbd8d82be56780dffe6b67b28c891f1b"}, + {file = "Cython-3.0.2-cp36-cp36m-win_amd64.whl", hash = "sha256:477cd3549597f09a1608da7b05e16ba641e9aedd171b868533a5a07790ed886f"}, + {file = "Cython-3.0.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:a49dde9f9e29ea82f29aaf3bb1a270b6eb90b75d627c7ff2f5dd3764540ae646"}, + {file = "Cython-3.0.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bc1c8013fad0933f5201186eccc5f2be223cafd6a8dcd586d3f7bb6ba84dc845"}, + {file = "Cython-3.0.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6b75e9c9d7ad7c9dd85d45241d1d4e3c5f66079c1f84eec91689c26d98bc3349"}, + {file = "Cython-3.0.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7f43c4d3ecd9e3b8b7afe834e519f55cf4249b1088f96d11b96f02c55cbaeff7"}, + {file = "Cython-3.0.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:dab6a923e21e212aa3dc6dde9b22a190f5d7c449315a94e57ddc019ea74a979b"}, + {file = "Cython-3.0.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:ae453cfa933b919c0a19d2cc5dc9fb28486268e95dc2ab7a11ab7f99cf8c3883"}, + {file = "Cython-3.0.2-cp37-cp37m-win32.whl", hash = "sha256:b1f023d36a3829069ed11017c670128be3f135a9c17bd64c35d3b3442243b05c"}, + {file = "Cython-3.0.2-cp37-cp37m-win_amd64.whl", hash = "sha256:011c4e0b75baee1843334562487eb4fbc0c59ddb2cc32a978b972a81eedcbdcc"}, + {file = "Cython-3.0.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:832bbee87bca760efeae248ddf19ccd77f9a2355cb6f8a64f20cc377e56957b3"}, + {file = "Cython-3.0.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4fe806d154b6b7f0ab746dac36c022889e2e7cf47546ff9afdc29a62cfa692d0"}, + {file = "Cython-3.0.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e486331a29e7700b1ad5f4f753bef483c81412a5e64a873df46d6cb66f9a65de"}, + {file = "Cython-3.0.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:54d41a1dfbaab74449873e7f8e6cd4239850fe7a50f7f784dd99a560927f3bac"}, + {file = "Cython-3.0.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:4dca13c86d6cd523c7d8bbf8db1b2bbf8faedd0addedb229158d8015ad1819e1"}, + {file = "Cython-3.0.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:10cbfb37f31938371a6213cc8b5459c639954aed053efeded3c012d4c5915db9"}, + {file = "Cython-3.0.2-cp38-cp38-win32.whl", hash = "sha256:e663c237579c033deaa2cb362b74651da7712f56e441c11382510a8c4c4f2dd7"}, + {file = "Cython-3.0.2-cp38-cp38-win_amd64.whl", hash = "sha256:2f84bd6cefa5130750c492038170c44f1cbd6f42e9ed85e168fd9cb453f85160"}, + {file = "Cython-3.0.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:f37e4287f520f3748a06ad5eaae09ba4ac68f52e155d70de5f75780d83575c43"}, + {file = "Cython-3.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dd30826ca8b27b2955a63c8ffe8aacc9f0779582b4bd154cf7b441ac10dae2cb"}, + {file = "Cython-3.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:08d67c7225a09eeb77e090c8d4f60677165b052ccf76e3a57d8237064e5c2de2"}, + {file = "Cython-3.0.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9e625eec8c5c9a8cb062a318b257cc469d301bed952c7daf86e38bbd3afe7c91"}, + {file = "Cython-3.0.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:1b12a8f23270675b537d1c3b988f845bea4bbcc66ae0468857f5ede0526d4522"}, + {file = "Cython-3.0.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:62dd78afdf748a58dae9c9b9c42a1519ae30787b28ce5f84a0e1bb54144142ca"}, + {file = "Cython-3.0.2-cp39-cp39-win32.whl", hash = "sha256:d0d0cc4ecc05f41c5e02af14ac0083552d22efed976f79eb7bade55fed63b25d"}, + {file = "Cython-3.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:147cc1d3dda8b06de9d86df5e59cdf15f0a522620168b7349a5ec88b48104d7d"}, + {file = "Cython-3.0.2-py2.py3-none-any.whl", hash = "sha256:8f1c9e4b8e413da211dd7942440cf410ff0eafb081309e04e81f4fafbb146bf2"}, + {file = "Cython-3.0.2.tar.gz", hash = "sha256:9594818dca8bb22ae6580c5222da2bc5cc32334350bd2d294a00d8669bcc61b5"}, +] + +[[package]] +name = "decorator" +version = "5.1.1" +description = "Decorators for Humans" +optional = true +python-versions = ">=3.5" +files = [ + {file = "decorator-5.1.1-py3-none-any.whl", hash = "sha256:b8c3f85900b9dc423225913c5aace94729fe1fa9763b38939a95226f02d37186"}, + {file = "decorator-5.1.1.tar.gz", hash = "sha256:637996211036b6385ef91435e4fae22989472f9d571faba8927ba8253acbc330"}, +] + +[[package]] +name = "distlib" +version = "0.3.7" +description = "Distribution utilities" +optional = false +python-versions = "*" +files = [ + {file = "distlib-0.3.7-py2.py3-none-any.whl", hash = "sha256:2e24928bc811348f0feb63014e97aaae3037f2cf48712d51ae61df7fd6075057"}, + {file = "distlib-0.3.7.tar.gz", hash = "sha256:9dafe54b34a028eafd95039d5e5d4851a13734540f1331060d31c9916e7147a8"}, +] + +[[package]] +name = "docutils" +version = "0.20.1" +description = "Docutils -- Python Documentation Utilities" +optional = false +python-versions = ">=3.7" +files = [ + {file = "docutils-0.20.1-py3-none-any.whl", hash = "sha256:96f387a2c5562db4476f09f13bbab2192e764cac08ebbf3a34a95d9b1e4a59d6"}, + {file = "docutils-0.20.1.tar.gz", hash = "sha256:f08a4e276c3a1583a86dce3e34aba3fe04d02bba2dd51ed16106244e8a923e3b"}, +] + +[[package]] +name = "duckdb" +version = "0.8.1" +description = "DuckDB embedded database" +optional = true +python-versions = "*" +files = [ + {file = "duckdb-0.8.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:14781d21580ee72aba1f5dcae7734674c9b6c078dd60470a08b2b420d15b996d"}, + {file = "duckdb-0.8.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:f13bf7ab0e56ddd2014ef762ae4ee5ea4df5a69545ce1191b8d7df8118ba3167"}, + {file = "duckdb-0.8.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e4032042d8363e55365bbca3faafc6dc336ed2aad088f10ae1a534ebc5bcc181"}, + {file = "duckdb-0.8.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:31a71bd8f0b0ca77c27fa89b99349ef22599ffefe1e7684ae2e1aa2904a08684"}, + {file = "duckdb-0.8.1-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:24568d6e48f3dbbf4a933109e323507a46b9399ed24c5d4388c4987ddc694fd0"}, + {file = "duckdb-0.8.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:297226c0dadaa07f7c5ae7cbdb9adba9567db7b16693dbd1b406b739ce0d7924"}, + {file = "duckdb-0.8.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:5792cf777ece2c0591194006b4d3e531f720186102492872cb32ddb9363919cf"}, + {file = "duckdb-0.8.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:12803f9f41582b68921d6b21f95ba7a51e1d8f36832b7d8006186f58c3d1b344"}, + {file = "duckdb-0.8.1-cp310-cp310-win32.whl", hash = "sha256:d0953d5a2355ddc49095e7aef1392b7f59c5be5cec8cdc98b9d9dc1f01e7ce2b"}, + {file = "duckdb-0.8.1-cp310-cp310-win_amd64.whl", hash = "sha256:6e6583c98a7d6637e83bcadfbd86e1f183917ea539f23b6b41178f32f813a5eb"}, + {file = "duckdb-0.8.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:fad7ed0d4415f633d955ac24717fa13a500012b600751d4edb050b75fb940c25"}, + {file = "duckdb-0.8.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:81ae602f34d38d9c48dd60f94b89f28df3ef346830978441b83c5b4eae131d08"}, + {file = "duckdb-0.8.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:7d75cfe563aaa058d3b4ccaaa371c6271e00e3070df5de72361fd161b2fe6780"}, + {file = "duckdb-0.8.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8dbb55e7a3336f2462e5e916fc128c47fe1c03b6208d6bd413ac11ed95132aa0"}, + {file = "duckdb-0.8.1-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a6df53efd63b6fdf04657385a791a4e3c4fb94bfd5db181c4843e2c46b04fef5"}, + {file = "duckdb-0.8.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1b188b80b70d1159b17c9baaf541c1799c1ce8b2af4add179a9eed8e2616be96"}, + {file = "duckdb-0.8.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:5ad481ee353f31250b45d64b4a104e53b21415577943aa8f84d0af266dc9af85"}, + {file = "duckdb-0.8.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d1d1b1729993611b1892509d21c21628917625cdbe824a61ce891baadf684b32"}, + {file = "duckdb-0.8.1-cp311-cp311-win32.whl", hash = "sha256:2d8f9cc301e8455a4f89aa1088b8a2d628f0c1f158d4cf9bc78971ed88d82eea"}, + {file = "duckdb-0.8.1-cp311-cp311-win_amd64.whl", hash = "sha256:07457a43605223f62d93d2a5a66b3f97731f79bbbe81fdd5b79954306122f612"}, + {file = "duckdb-0.8.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:d2c8062c3e978dbcd80d712ca3e307de8a06bd4f343aa457d7dd7294692a3842"}, + {file = "duckdb-0.8.1-cp36-cp36m-win32.whl", hash = "sha256:fad486c65ae944eae2de0d590a0a4fb91a9893df98411d66cab03359f9cba39b"}, + {file = "duckdb-0.8.1-cp36-cp36m-win_amd64.whl", hash = "sha256:86fa4506622c52d2df93089c8e7075f1c4d0ba56f4bf27faebde8725355edf32"}, + {file = "duckdb-0.8.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:60e07a62782f88420046e30cc0e3de842d0901c4fd5b8e4d28b73826ec0c3f5e"}, + {file = "duckdb-0.8.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f18563675977f8cbf03748efee0165b4c8ef64e0cbe48366f78e2914d82138bb"}, + {file = "duckdb-0.8.1-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:16e179443832bea8439ae4dff93cf1e42c545144ead7a4ef5f473e373eea925a"}, + {file = "duckdb-0.8.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a413d5267cb41a1afe69d30dd6d4842c588256a6fed7554c7e07dad251ede095"}, + {file = "duckdb-0.8.1-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:3784680df59eadd683b0a4c2375d451a64470ca54bd171c01e36951962b1d332"}, + {file = "duckdb-0.8.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:67a1725c2b01f9b53571ecf3f92959b652f60156c1c48fb35798302e39b3c1a2"}, + {file = "duckdb-0.8.1-cp37-cp37m-win32.whl", hash = "sha256:197d37e2588c5ad063e79819054eedb7550d43bf1a557d03ba8f8f67f71acc42"}, + {file = "duckdb-0.8.1-cp37-cp37m-win_amd64.whl", hash = "sha256:3843feb79edf100800f5037c32d5d5a5474fb94b32ace66c707b96605e7c16b2"}, + {file = "duckdb-0.8.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:624c889b0f2d656794757b3cc4fc58030d5e285f5ad2ef9fba1ea34a01dab7fb"}, + {file = "duckdb-0.8.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:fcbe3742d77eb5add2d617d487266d825e663270ef90253366137a47eaab9448"}, + {file = "duckdb-0.8.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:47516c9299d09e9dbba097b9fb339b389313c4941da5c54109df01df0f05e78c"}, + {file = "duckdb-0.8.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cf1ba718b7522d34399446ebd5d4b9fcac0b56b6ac07bfebf618fd190ec37c1d"}, + {file = "duckdb-0.8.1-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e36e35d38a9ae798fe8cf6a839e81494d5b634af89f4ec9483f4d0a313fc6bdb"}, + {file = "duckdb-0.8.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:23493313f88ce6e708a512daacad13e83e6d1ea0be204b175df1348f7fc78671"}, + {file = "duckdb-0.8.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:1fb9bf0b6f63616c8a4b9a6a32789045e98c108df100e6bac783dc1e36073737"}, + {file = "duckdb-0.8.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:12fc13ecd5eddd28b203b9e3999040d3a7374a8f4b833b04bd26b8c5685c2635"}, + {file = "duckdb-0.8.1-cp38-cp38-win32.whl", hash = "sha256:a12bf4b18306c9cb2c9ba50520317e6cf2de861f121d6f0678505fa83468c627"}, + {file = "duckdb-0.8.1-cp38-cp38-win_amd64.whl", hash = "sha256:e4e809358b9559c00caac4233e0e2014f3f55cd753a31c4bcbbd1b55ad0d35e4"}, + {file = "duckdb-0.8.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:7acedfc00d97fbdb8c3d120418c41ef3cb86ef59367f3a9a30dff24470d38680"}, + {file = "duckdb-0.8.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:99bfe264059cdc1e318769103f656f98e819cd4e231cd76c1d1a0327f3e5cef8"}, + {file = "duckdb-0.8.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:538b225f361066231bc6cd66c04a5561de3eea56115a5dd773e99e5d47eb1b89"}, + {file = "duckdb-0.8.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ae0be3f71a18cd8492d05d0fc1bc67d01d5a9457b04822d025b0fc8ee6efe32e"}, + {file = "duckdb-0.8.1-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cd82ba63b58672e46c8ec60bc9946aa4dd7b77f21c1ba09633d8847ad9eb0d7b"}, + {file = "duckdb-0.8.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:780a34559aaec8354e83aa4b7b31b3555f1b2cf75728bf5ce11b89a950f5cdd9"}, + {file = "duckdb-0.8.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:01f0d4e9f7103523672bda8d3f77f440b3e0155dd3b2f24997bc0c77f8deb460"}, + {file = "duckdb-0.8.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:31f692decb98c2d57891da27180201d9e93bb470a3051fcf413e8da65bca37a5"}, + {file = "duckdb-0.8.1-cp39-cp39-win32.whl", hash = "sha256:e7fe93449cd309bbc67d1bf6f6392a6118e94a9a4479ab8a80518742e855370a"}, + {file = "duckdb-0.8.1-cp39-cp39-win_amd64.whl", hash = "sha256:81d670bc6807672f038332d9bf587037aabdd741b0810de191984325ed307abd"}, + {file = "duckdb-0.8.1.tar.gz", hash = "sha256:a54d37f4abc2afc4f92314aaa56ecf215a411f40af4bffe1e86bd25e62aceee9"}, +] + +[[package]] +name = "exceptiongroup" +version = "1.1.3" +description = "Backport of PEP 654 (exception groups)" +optional = false +python-versions = ">=3.7" +files = [ + {file = "exceptiongroup-1.1.3-py3-none-any.whl", hash = "sha256:343280667a4585d195ca1cf9cef84a4e178c4b6cf2274caef9859782b567d5e3"}, + {file = "exceptiongroup-1.1.3.tar.gz", hash = "sha256:097acd85d473d75af5bb98e41b61ff7fe35efe6675e4f9370ec6ec5126d160e9"}, +] + +[package.extras] +test = ["pytest (>=6)"] + +[[package]] +name = "fastavro" +version = "1.8.3" +description = "Fast read/write of AVRO files" +optional = false +python-versions = ">=3.8" +files = [ + {file = "fastavro-1.8.3-cp310-cp310-macosx_11_0_x86_64.whl", hash = "sha256:0e7d98e27cfff61befa23b11422e72a9516fe571d87fd41a656074a958d1f5df"}, + {file = "fastavro-1.8.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:60bfc8b6d3a3e27ae68ce952f9c7a63001dd82f96a519ff25d105a2b61b4bae9"}, + {file = "fastavro-1.8.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d107cde443d92be8b65007ef304a602702853925c4b9ce63b66b8cdf04938af0"}, + {file = "fastavro-1.8.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:4bf02deeea731910d55e24f3b44a848007b600ddd0b8861dab9075aa116b0da1"}, + {file = "fastavro-1.8.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:b30bbe06289310ff60c32c0ab01c394354f5bcae421842f06915ee7e401232ee"}, + {file = "fastavro-1.8.3-cp310-cp310-win_amd64.whl", hash = "sha256:c38ba23be298b1df63eaadf0663e8c1dc3fe355608ba3ce769554f61cc20f2d8"}, + {file = "fastavro-1.8.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:4e5d8ad6dcf7cc4e15fc5f30344e4fcb1bac5c0c1b48ae88a46ceef470c04b0c"}, + {file = "fastavro-1.8.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2bf6f1debda2bf84f57bdeee289e38e1ea8b23722792b7bdec8be6b3bf4dac67"}, + {file = "fastavro-1.8.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eb2702976cf9bf4e1c66bae3534f498a93272eaa4cf2ba24fe18aa29c5fab647"}, + {file = "fastavro-1.8.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:dfc73765b294ef56f71c1bb064ee81efa1da13bb0b1134dd53674bbb89477c78"}, + {file = "fastavro-1.8.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:fa282c78485be06df1e25f0c9b6837de520a22838e7c9af95b58fc68c6c9ce34"}, + {file = "fastavro-1.8.3-cp311-cp311-win_amd64.whl", hash = "sha256:65e59420ce7a8cbb256363b9bc2b98fcd0c220723ec50541aa0aaf137dfa21fb"}, + {file = "fastavro-1.8.3-cp38-cp38-macosx_11_0_x86_64.whl", hash = "sha256:4416fdf69c82364d737e77c2a6ab06eeb20375d84813c061789e20bc047132a5"}, + {file = "fastavro-1.8.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d3f00ead48738e882832cc7ad87060365eb3eeace196ff9a5905a4caf0bab351"}, + {file = "fastavro-1.8.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ce57e57ec56a235ab012fde3ce7eaa3846980a9026448fcb32cb065f2460514"}, + {file = "fastavro-1.8.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:b74fbcf860084576bd773169a18ddd140d06f9e85bb622756f557023947f179f"}, + {file = "fastavro-1.8.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:b9164d7cb1541d15587c3a446f17719bc1f20008a1df1583e55d8b5a323266b5"}, + {file = "fastavro-1.8.3-cp38-cp38-win_amd64.whl", hash = "sha256:7ca4e19db2ded435dd393f58f65297102e7329ca8ba31d03be9c480b34be9123"}, + {file = "fastavro-1.8.3-cp39-cp39-macosx_11_0_x86_64.whl", hash = "sha256:821b1de9785e266142a8658e25df52bceaa40936c087925688a4fad4dee0beb0"}, + {file = "fastavro-1.8.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0116bf82a10eb3553d61d6d884f18c8b21049463fdefaaea9275d8bad64a0f5b"}, + {file = "fastavro-1.8.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6035368cbcbbb1063c2d1ce763ed5a602f4b6af13b325e77a6b61e45f8172067"}, + {file = "fastavro-1.8.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:c0c194340ad5c6a5a5347ba0170d1413c149cd87faddcc519d9fcdedadaa1619"}, + {file = "fastavro-1.8.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:7d2c093be35bcf77547d5cdeeefae2c18b88ae529fa3866da81f5c7c342fceb3"}, + {file = "fastavro-1.8.3-cp39-cp39-win_amd64.whl", hash = "sha256:861efe9ad25fc26c3d360761d48930e6aad0cabe5ae888f92a721699bfe612ed"}, + {file = "fastavro-1.8.3.tar.gz", hash = "sha256:a6c2ec69516e908fce64d93a13e6e83afb880f2edb5ad3adaa1eb04c918de6d8"}, +] + +[package.extras] +codecs = ["lz4", "python-snappy", "zstandard"] +lz4 = ["lz4"] +snappy = ["python-snappy"] +zstandard = ["zstandard"] + +[[package]] +name = "filelock" +version = "3.12.4" +description = "A platform independent file lock." +optional = false +python-versions = ">=3.8" +files = [ + {file = "filelock-3.12.4-py3-none-any.whl", hash = "sha256:08c21d87ded6e2b9da6728c3dff51baf1dcecf973b768ef35bcbc3447edb9ad4"}, + {file = "filelock-3.12.4.tar.gz", hash = "sha256:2e6f249f1f3654291606e046b09f1fd5eac39b360664c27f5aad072012f8bcbd"}, +] + +[package.extras] +docs = ["furo (>=2023.7.26)", "sphinx (>=7.1.2)", "sphinx-autodoc-typehints (>=1.24)"] +testing = ["covdefaults (>=2.3)", "coverage (>=7.3)", "diff-cover (>=7.7)", "pytest (>=7.4)", "pytest-cov (>=4.1)", "pytest-mock (>=3.11.1)", "pytest-timeout (>=2.1)"] +typing = ["typing-extensions (>=4.7.1)"] + +[[package]] +name = "frozenlist" +version = "1.4.0" +description = "A list-like structure which implements collections.abc.MutableSequence" +optional = true +python-versions = ">=3.8" +files = [ + {file = "frozenlist-1.4.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:764226ceef3125e53ea2cb275000e309c0aa5464d43bd72abd661e27fffc26ab"}, + {file = "frozenlist-1.4.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d6484756b12f40003c6128bfcc3fa9f0d49a687e171186c2d85ec82e3758c559"}, + {file = "frozenlist-1.4.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9ac08e601308e41eb533f232dbf6b7e4cea762f9f84f6357136eed926c15d12c"}, + {file = "frozenlist-1.4.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d081f13b095d74b67d550de04df1c756831f3b83dc9881c38985834387487f1b"}, + {file = "frozenlist-1.4.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:71932b597f9895f011f47f17d6428252fc728ba2ae6024e13c3398a087c2cdea"}, + {file = "frozenlist-1.4.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:981b9ab5a0a3178ff413bca62526bb784249421c24ad7381e39d67981be2c326"}, + {file = "frozenlist-1.4.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e41f3de4df3e80de75845d3e743b3f1c4c8613c3997a912dbf0229fc61a8b963"}, + {file = "frozenlist-1.4.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6918d49b1f90821e93069682c06ffde41829c346c66b721e65a5c62b4bab0300"}, + {file = "frozenlist-1.4.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:0e5c8764c7829343d919cc2dfc587a8db01c4f70a4ebbc49abde5d4b158b007b"}, + {file = "frozenlist-1.4.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:8d0edd6b1c7fb94922bf569c9b092ee187a83f03fb1a63076e7774b60f9481a8"}, + {file = "frozenlist-1.4.0-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:e29cda763f752553fa14c68fb2195150bfab22b352572cb36c43c47bedba70eb"}, + {file = "frozenlist-1.4.0-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:0c7c1b47859ee2cac3846fde1c1dc0f15da6cec5a0e5c72d101e0f83dcb67ff9"}, + {file = "frozenlist-1.4.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:901289d524fdd571be1c7be054f48b1f88ce8dddcbdf1ec698b27d4b8b9e5d62"}, + {file = "frozenlist-1.4.0-cp310-cp310-win32.whl", hash = "sha256:1a0848b52815006ea6596c395f87449f693dc419061cc21e970f139d466dc0a0"}, + {file = "frozenlist-1.4.0-cp310-cp310-win_amd64.whl", hash = "sha256:b206646d176a007466358aa21d85cd8600a415c67c9bd15403336c331a10d956"}, + {file = "frozenlist-1.4.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:de343e75f40e972bae1ef6090267f8260c1446a1695e77096db6cfa25e759a95"}, + {file = "frozenlist-1.4.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ad2a9eb6d9839ae241701d0918f54c51365a51407fd80f6b8289e2dfca977cc3"}, + {file = "frozenlist-1.4.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:bd7bd3b3830247580de99c99ea2a01416dfc3c34471ca1298bccabf86d0ff4dc"}, + {file = "frozenlist-1.4.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bdf1847068c362f16b353163391210269e4f0569a3c166bc6a9f74ccbfc7e839"}, + {file = "frozenlist-1.4.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:38461d02d66de17455072c9ba981d35f1d2a73024bee7790ac2f9e361ef1cd0c"}, + {file = "frozenlist-1.4.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d5a32087d720c608f42caed0ef36d2b3ea61a9d09ee59a5142d6070da9041b8f"}, + {file = "frozenlist-1.4.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dd65632acaf0d47608190a71bfe46b209719bf2beb59507db08ccdbe712f969b"}, + {file = "frozenlist-1.4.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:261b9f5d17cac914531331ff1b1d452125bf5daa05faf73b71d935485b0c510b"}, + {file = "frozenlist-1.4.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:b89ac9768b82205936771f8d2eb3ce88503b1556324c9f903e7156669f521472"}, + {file = "frozenlist-1.4.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:008eb8b31b3ea6896da16c38c1b136cb9fec9e249e77f6211d479db79a4eaf01"}, + {file = "frozenlist-1.4.0-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:e74b0506fa5aa5598ac6a975a12aa8928cbb58e1f5ac8360792ef15de1aa848f"}, + {file = "frozenlist-1.4.0-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:490132667476f6781b4c9458298b0c1cddf237488abd228b0b3650e5ecba7467"}, + {file = "frozenlist-1.4.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:76d4711f6f6d08551a7e9ef28c722f4a50dd0fc204c56b4bcd95c6cc05ce6fbb"}, + {file = "frozenlist-1.4.0-cp311-cp311-win32.whl", hash = "sha256:a02eb8ab2b8f200179b5f62b59757685ae9987996ae549ccf30f983f40602431"}, + {file = "frozenlist-1.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:515e1abc578dd3b275d6a5114030b1330ba044ffba03f94091842852f806f1c1"}, + {file = "frozenlist-1.4.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:f0ed05f5079c708fe74bf9027e95125334b6978bf07fd5ab923e9e55e5fbb9d3"}, + {file = "frozenlist-1.4.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:ca265542ca427bf97aed183c1676e2a9c66942e822b14dc6e5f42e038f92a503"}, + {file = "frozenlist-1.4.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:491e014f5c43656da08958808588cc6c016847b4360e327a62cb308c791bd2d9"}, + {file = "frozenlist-1.4.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:17ae5cd0f333f94f2e03aaf140bb762c64783935cc764ff9c82dff626089bebf"}, + {file = "frozenlist-1.4.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1e78fb68cf9c1a6aa4a9a12e960a5c9dfbdb89b3695197aa7064705662515de2"}, + {file = "frozenlist-1.4.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d5655a942f5f5d2c9ed93d72148226d75369b4f6952680211972a33e59b1dfdc"}, + {file = "frozenlist-1.4.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c11b0746f5d946fecf750428a95f3e9ebe792c1ee3b1e96eeba145dc631a9672"}, + {file = "frozenlist-1.4.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e66d2a64d44d50d2543405fb183a21f76b3b5fd16f130f5c99187c3fb4e64919"}, + {file = "frozenlist-1.4.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:88f7bc0fcca81f985f78dd0fa68d2c75abf8272b1f5c323ea4a01a4d7a614efc"}, + {file = "frozenlist-1.4.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:5833593c25ac59ede40ed4de6d67eb42928cca97f26feea219f21d0ed0959b79"}, + {file = "frozenlist-1.4.0-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:fec520865f42e5c7f050c2a79038897b1c7d1595e907a9e08e3353293ffc948e"}, + {file = "frozenlist-1.4.0-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:b826d97e4276750beca7c8f0f1a4938892697a6bcd8ec8217b3312dad6982781"}, + {file = "frozenlist-1.4.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:ceb6ec0a10c65540421e20ebd29083c50e6d1143278746a4ef6bcf6153171eb8"}, + {file = "frozenlist-1.4.0-cp38-cp38-win32.whl", hash = "sha256:2b8bcf994563466db019fab287ff390fffbfdb4f905fc77bc1c1d604b1c689cc"}, + {file = "frozenlist-1.4.0-cp38-cp38-win_amd64.whl", hash = "sha256:a6c8097e01886188e5be3e6b14e94ab365f384736aa1fca6a0b9e35bd4a30bc7"}, + {file = "frozenlist-1.4.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:6c38721585f285203e4b4132a352eb3daa19121a035f3182e08e437cface44bf"}, + {file = "frozenlist-1.4.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:a0c6da9aee33ff0b1a451e867da0c1f47408112b3391dd43133838339e410963"}, + {file = "frozenlist-1.4.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:93ea75c050c5bb3d98016b4ba2497851eadf0ac154d88a67d7a6816206f6fa7f"}, + {file = "frozenlist-1.4.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f61e2dc5ad442c52b4887f1fdc112f97caeff4d9e6ebe78879364ac59f1663e1"}, + {file = "frozenlist-1.4.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:aa384489fefeb62321b238e64c07ef48398fe80f9e1e6afeff22e140e0850eef"}, + {file = "frozenlist-1.4.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:10ff5faaa22786315ef57097a279b833ecab1a0bfb07d604c9cbb1c4cdc2ed87"}, + {file = "frozenlist-1.4.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:007df07a6e3eb3e33e9a1fe6a9db7af152bbd8a185f9aaa6ece10a3529e3e1c6"}, + {file = "frozenlist-1.4.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7f4f399d28478d1f604c2ff9119907af9726aed73680e5ed1ca634d377abb087"}, + {file = "frozenlist-1.4.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:c5374b80521d3d3f2ec5572e05adc94601985cc526fb276d0c8574a6d749f1b3"}, + {file = "frozenlist-1.4.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:ce31ae3e19f3c902de379cf1323d90c649425b86de7bbdf82871b8a2a0615f3d"}, + {file = "frozenlist-1.4.0-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:7211ef110a9194b6042449431e08c4d80c0481e5891e58d429df5899690511c2"}, + {file = "frozenlist-1.4.0-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:556de4430ce324c836789fa4560ca62d1591d2538b8ceb0b4f68fb7b2384a27a"}, + {file = "frozenlist-1.4.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:7645a8e814a3ee34a89c4a372011dcd817964ce8cb273c8ed6119d706e9613e3"}, + {file = "frozenlist-1.4.0-cp39-cp39-win32.whl", hash = "sha256:19488c57c12d4e8095a922f328df3f179c820c212940a498623ed39160bc3c2f"}, + {file = "frozenlist-1.4.0-cp39-cp39-win_amd64.whl", hash = "sha256:6221d84d463fb110bdd7619b69cb43878a11d51cbb9394ae3105d082d5199167"}, + {file = "frozenlist-1.4.0.tar.gz", hash = "sha256:09163bdf0b2907454042edb19f887c6d33806adc71fbd54afc14908bfdc22251"}, +] + +[[package]] +name = "fsspec" +version = "2023.9.1" +description = "File-system specification" +optional = false +python-versions = ">=3.8" +files = [ + {file = "fsspec-2023.9.1-py3-none-any.whl", hash = "sha256:99a974063b6cced36cfaa61aa8efb05439c6fea2dafe65930e7ab46f9d2f8930"}, + {file = "fsspec-2023.9.1.tar.gz", hash = "sha256:da8cfe39eeb65aaa69074d5e0e4bbc9b7ef72d69c0587a31cab981eefdb3da13"}, +] + +[package.extras] +abfs = ["adlfs"] +adl = ["adlfs"] +arrow = ["pyarrow (>=1)"] +dask = ["dask", "distributed"] +devel = ["pytest", "pytest-cov"] +dropbox = ["dropbox", "dropboxdrivefs", "requests"] +full = ["adlfs", "aiohttp (!=4.0.0a0,!=4.0.0a1)", "dask", "distributed", "dropbox", "dropboxdrivefs", "fusepy", "gcsfs", "libarchive-c", "ocifs", "panel", "paramiko", "pyarrow (>=1)", "pygit2", "requests", "s3fs", "smbprotocol", "tqdm"] +fuse = ["fusepy"] +gcs = ["gcsfs"] +git = ["pygit2"] +github = ["requests"] +gs = ["gcsfs"] +gui = ["panel"] +hdfs = ["pyarrow (>=1)"] +http = ["aiohttp (!=4.0.0a0,!=4.0.0a1)", "requests"] +libarchive = ["libarchive-c"] +oci = ["ocifs"] +s3 = ["s3fs"] +sftp = ["paramiko"] +smb = ["smbprotocol"] +ssh = ["paramiko"] +tqdm = ["tqdm"] + +[[package]] +name = "gcsfs" +version = "2023.9.1" +description = "Convenient Filesystem interface over GCS" +optional = true +python-versions = ">=3.8" +files = [ + {file = "gcsfs-2023.9.1-py2.py3-none-any.whl", hash = "sha256:c673caf901fc923d121399782394aba9c99dac4332a15ba21bd320d0b7f46521"}, + {file = "gcsfs-2023.9.1.tar.gz", hash = "sha256:47698bba0468896bfb33749552fe459fa745069119d7c2605a53bf4f3a1f09ac"}, +] + +[package.dependencies] +aiohttp = "<4.0.0a0 || >4.0.0a0,<4.0.0a1 || >4.0.0a1" +decorator = ">4.1.2" +fsspec = "2023.9.1" +google-auth = ">=1.2" +google-auth-oauthlib = "*" +google-cloud-storage = "*" +requests = "*" + +[package.extras] +crc = ["crcmod"] +gcsfuse = ["fusepy"] + +[[package]] +name = "google-api-core" +version = "2.11.1" +description = "Google API client core library" +optional = true +python-versions = ">=3.7" +files = [ + {file = "google-api-core-2.11.1.tar.gz", hash = "sha256:25d29e05a0058ed5f19c61c0a78b1b53adea4d9364b464d014fbda941f6d1c9a"}, + {file = "google_api_core-2.11.1-py3-none-any.whl", hash = "sha256:d92a5a92dc36dd4f4b9ee4e55528a90e432b059f93aee6ad857f9de8cc7ae94a"}, +] + +[package.dependencies] +google-auth = ">=2.14.1,<3.0.dev0" +googleapis-common-protos = ">=1.56.2,<2.0.dev0" +protobuf = ">=3.19.5,<3.20.0 || >3.20.0,<3.20.1 || >3.20.1,<4.21.0 || >4.21.0,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<5.0.0.dev0" +requests = ">=2.18.0,<3.0.0.dev0" + +[package.extras] +grpc = ["grpcio (>=1.33.2,<2.0dev)", "grpcio (>=1.49.1,<2.0dev)", "grpcio-status (>=1.33.2,<2.0.dev0)", "grpcio-status (>=1.49.1,<2.0.dev0)"] +grpcgcp = ["grpcio-gcp (>=0.2.2,<1.0.dev0)"] +grpcio-gcp = ["grpcio-gcp (>=0.2.2,<1.0.dev0)"] + +[[package]] +name = "google-auth" +version = "2.23.0" +description = "Google Authentication Library" +optional = true +python-versions = ">=3.7" +files = [ + {file = "google-auth-2.23.0.tar.gz", hash = "sha256:753a26312e6f1eaeec20bc6f2644a10926697da93446e1f8e24d6d32d45a922a"}, + {file = "google_auth-2.23.0-py2.py3-none-any.whl", hash = "sha256:2cec41407bd1e207f5b802638e32bb837df968bb5c05f413d0fa526fac4cf7a7"}, +] + +[package.dependencies] +cachetools = ">=2.0.0,<6.0" +pyasn1-modules = ">=0.2.1" +rsa = ">=3.1.4,<5" +urllib3 = "<2.0" + +[package.extras] +aiohttp = ["aiohttp (>=3.6.2,<4.0.0.dev0)", "requests (>=2.20.0,<3.0.0.dev0)"] +enterprise-cert = ["cryptography (==36.0.2)", "pyopenssl (==22.0.0)"] +pyopenssl = ["cryptography (>=38.0.3)", "pyopenssl (>=20.0.0)"] +reauth = ["pyu2f (>=0.1.5)"] +requests = ["requests (>=2.20.0,<3.0.0.dev0)"] + +[[package]] +name = "google-auth-oauthlib" +version = "1.1.0" +description = "Google Authentication Library" +optional = true +python-versions = ">=3.6" +files = [ + {file = "google-auth-oauthlib-1.1.0.tar.gz", hash = "sha256:83ea8c3b0881e453790baff4448e8a6112ac8778d1de9da0b68010b843937afb"}, + {file = "google_auth_oauthlib-1.1.0-py2.py3-none-any.whl", hash = "sha256:089c6e587d36f4803ac7e0720c045c6a8b1fd1790088b8424975b90d0ee61c12"}, +] + +[package.dependencies] +google-auth = ">=2.15.0" +requests-oauthlib = ">=0.7.0" + +[package.extras] +tool = ["click (>=6.0.0)"] + +[[package]] +name = "google-cloud-core" +version = "2.3.3" +description = "Google Cloud API client core library" +optional = true +python-versions = ">=3.7" +files = [ + {file = "google-cloud-core-2.3.3.tar.gz", hash = "sha256:37b80273c8d7eee1ae816b3a20ae43585ea50506cb0e60f3cf5be5f87f1373cb"}, + {file = "google_cloud_core-2.3.3-py2.py3-none-any.whl", hash = "sha256:fbd11cad3e98a7e5b0343dc07cb1039a5ffd7a5bb96e1f1e27cee4bda4a90863"}, +] + +[package.dependencies] +google-api-core = ">=1.31.6,<2.0.dev0 || >2.3.0,<3.0.0dev" +google-auth = ">=1.25.0,<3.0dev" + +[package.extras] +grpc = ["grpcio (>=1.38.0,<2.0dev)"] + +[[package]] +name = "google-cloud-storage" +version = "2.11.0" +description = "Google Cloud Storage API client library" +optional = true +python-versions = ">=3.7" +files = [ + {file = "google-cloud-storage-2.11.0.tar.gz", hash = "sha256:6fbf62659b83c8f3a0a743af0d661d2046c97c3a5bfb587c4662c4bc68de3e31"}, + {file = "google_cloud_storage-2.11.0-py2.py3-none-any.whl", hash = "sha256:88cbd7fb3d701c780c4272bc26952db99f25eb283fb4c2208423249f00b5fe53"}, +] + +[package.dependencies] +google-api-core = ">=1.31.5,<2.0.dev0 || >2.3.0,<3.0.0dev" +google-auth = ">=1.25.0,<3.0dev" +google-cloud-core = ">=2.3.0,<3.0dev" +google-resumable-media = ">=2.6.0" +requests = ">=2.18.0,<3.0.0dev" + +[package.extras] +protobuf = ["protobuf (<5.0.0dev)"] + +[[package]] +name = "google-crc32c" +version = "1.5.0" +description = "A python wrapper of the C library 'Google CRC32C'" +optional = true +python-versions = ">=3.7" +files = [ + {file = "google-crc32c-1.5.0.tar.gz", hash = "sha256:89284716bc6a5a415d4eaa11b1726d2d60a0cd12aadf5439828353662ede9dd7"}, + {file = "google_crc32c-1.5.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:596d1f98fc70232fcb6590c439f43b350cb762fb5d61ce7b0e9db4539654cc13"}, + {file = "google_crc32c-1.5.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:be82c3c8cfb15b30f36768797a640e800513793d6ae1724aaaafe5bf86f8f346"}, + {file = "google_crc32c-1.5.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:461665ff58895f508e2866824a47bdee72497b091c730071f2b7575d5762ab65"}, + {file = "google_crc32c-1.5.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e2096eddb4e7c7bdae4bd69ad364e55e07b8316653234a56552d9c988bd2d61b"}, + {file = "google_crc32c-1.5.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:116a7c3c616dd14a3de8c64a965828b197e5f2d121fedd2f8c5585c547e87b02"}, + {file = "google_crc32c-1.5.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:5829b792bf5822fd0a6f6eb34c5f81dd074f01d570ed7f36aa101d6fc7a0a6e4"}, + {file = "google_crc32c-1.5.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:64e52e2b3970bd891309c113b54cf0e4384762c934d5ae56e283f9a0afcd953e"}, + {file = "google_crc32c-1.5.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:02ebb8bf46c13e36998aeaad1de9b48f4caf545e91d14041270d9dca767b780c"}, + {file = "google_crc32c-1.5.0-cp310-cp310-win32.whl", hash = "sha256:2e920d506ec85eb4ba50cd4228c2bec05642894d4c73c59b3a2fe20346bd00ee"}, + {file = "google_crc32c-1.5.0-cp310-cp310-win_amd64.whl", hash = "sha256:07eb3c611ce363c51a933bf6bd7f8e3878a51d124acfc89452a75120bc436289"}, + {file = "google_crc32c-1.5.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:cae0274952c079886567f3f4f685bcaf5708f0a23a5f5216fdab71f81a6c0273"}, + {file = "google_crc32c-1.5.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:1034d91442ead5a95b5aaef90dbfaca8633b0247d1e41621d1e9f9db88c36298"}, + {file = "google_crc32c-1.5.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7c42c70cd1d362284289c6273adda4c6af8039a8ae12dc451dcd61cdabb8ab57"}, + {file = "google_crc32c-1.5.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8485b340a6a9e76c62a7dce3c98e5f102c9219f4cfbf896a00cf48caf078d438"}, + {file = "google_crc32c-1.5.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:77e2fd3057c9d78e225fa0a2160f96b64a824de17840351b26825b0848022906"}, + {file = "google_crc32c-1.5.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:f583edb943cf2e09c60441b910d6a20b4d9d626c75a36c8fcac01a6c96c01183"}, + {file = "google_crc32c-1.5.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:a1fd716e7a01f8e717490fbe2e431d2905ab8aa598b9b12f8d10abebb36b04dd"}, + {file = "google_crc32c-1.5.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:72218785ce41b9cfd2fc1d6a017dc1ff7acfc4c17d01053265c41a2c0cc39b8c"}, + {file = "google_crc32c-1.5.0-cp311-cp311-win32.whl", hash = "sha256:66741ef4ee08ea0b2cc3c86916ab66b6aef03768525627fd6a1b34968b4e3709"}, + {file = "google_crc32c-1.5.0-cp311-cp311-win_amd64.whl", hash = "sha256:ba1eb1843304b1e5537e1fca632fa894d6f6deca8d6389636ee5b4797affb968"}, + {file = "google_crc32c-1.5.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:98cb4d057f285bd80d8778ebc4fde6b4d509ac3f331758fb1528b733215443ae"}, + {file = "google_crc32c-1.5.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fd8536e902db7e365f49e7d9029283403974ccf29b13fc7028b97e2295b33556"}, + {file = "google_crc32c-1.5.0-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:19e0a019d2c4dcc5e598cd4a4bc7b008546b0358bd322537c74ad47a5386884f"}, + {file = "google_crc32c-1.5.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:02c65b9817512edc6a4ae7c7e987fea799d2e0ee40c53ec573a692bee24de876"}, + {file = "google_crc32c-1.5.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:6ac08d24c1f16bd2bf5eca8eaf8304812f44af5cfe5062006ec676e7e1d50afc"}, + {file = "google_crc32c-1.5.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:3359fc442a743e870f4588fcf5dcbc1bf929df1fad8fb9905cd94e5edb02e84c"}, + {file = "google_crc32c-1.5.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:1e986b206dae4476f41bcec1faa057851f3889503a70e1bdb2378d406223994a"}, + {file = "google_crc32c-1.5.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:de06adc872bcd8c2a4e0dc51250e9e65ef2ca91be023b9d13ebd67c2ba552e1e"}, + {file = "google_crc32c-1.5.0-cp37-cp37m-win32.whl", hash = "sha256:d3515f198eaa2f0ed49f8819d5732d70698c3fa37384146079b3799b97667a94"}, + {file = "google_crc32c-1.5.0-cp37-cp37m-win_amd64.whl", hash = "sha256:67b741654b851abafb7bc625b6d1cdd520a379074e64b6a128e3b688c3c04740"}, + {file = "google_crc32c-1.5.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:c02ec1c5856179f171e032a31d6f8bf84e5a75c45c33b2e20a3de353b266ebd8"}, + {file = "google_crc32c-1.5.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:edfedb64740750e1a3b16152620220f51d58ff1b4abceb339ca92e934775c27a"}, + {file = "google_crc32c-1.5.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:84e6e8cd997930fc66d5bb4fde61e2b62ba19d62b7abd7a69920406f9ecca946"}, + {file = "google_crc32c-1.5.0-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:024894d9d3cfbc5943f8f230e23950cd4906b2fe004c72e29b209420a1e6b05a"}, + {file = "google_crc32c-1.5.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:998679bf62b7fb599d2878aa3ed06b9ce688b8974893e7223c60db155f26bd8d"}, + {file = "google_crc32c-1.5.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:83c681c526a3439b5cf94f7420471705bbf96262f49a6fe546a6db5f687a3d4a"}, + {file = "google_crc32c-1.5.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:4c6fdd4fccbec90cc8a01fc00773fcd5fa28db683c116ee3cb35cd5da9ef6c37"}, + {file = "google_crc32c-1.5.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:5ae44e10a8e3407dbe138984f21e536583f2bba1be9491239f942c2464ac0894"}, + {file = "google_crc32c-1.5.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:37933ec6e693e51a5b07505bd05de57eee12f3e8c32b07da7e73669398e6630a"}, + {file = "google_crc32c-1.5.0-cp38-cp38-win32.whl", hash = "sha256:fe70e325aa68fa4b5edf7d1a4b6f691eb04bbccac0ace68e34820d283b5f80d4"}, + {file = "google_crc32c-1.5.0-cp38-cp38-win_amd64.whl", hash = "sha256:74dea7751d98034887dbd821b7aae3e1d36eda111d6ca36c206c44478035709c"}, + {file = "google_crc32c-1.5.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:c6c777a480337ac14f38564ac88ae82d4cd238bf293f0a22295b66eb89ffced7"}, + {file = "google_crc32c-1.5.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:759ce4851a4bb15ecabae28f4d2e18983c244eddd767f560165563bf9aefbc8d"}, + {file = "google_crc32c-1.5.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f13cae8cc389a440def0c8c52057f37359014ccbc9dc1f0827936bcd367c6100"}, + {file = "google_crc32c-1.5.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e560628513ed34759456a416bf86b54b2476c59144a9138165c9a1575801d0d9"}, + {file = "google_crc32c-1.5.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e1674e4307fa3024fc897ca774e9c7562c957af85df55efe2988ed9056dc4e57"}, + {file = "google_crc32c-1.5.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:278d2ed7c16cfc075c91378c4f47924c0625f5fc84b2d50d921b18b7975bd210"}, + {file = "google_crc32c-1.5.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:d5280312b9af0976231f9e317c20e4a61cd2f9629b7bfea6a693d1878a264ebd"}, + {file = "google_crc32c-1.5.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:8b87e1a59c38f275c0e3676fc2ab6d59eccecfd460be267ac360cc31f7bcde96"}, + {file = "google_crc32c-1.5.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:7c074fece789b5034b9b1404a1f8208fc2d4c6ce9decdd16e8220c5a793e6f61"}, + {file = "google_crc32c-1.5.0-cp39-cp39-win32.whl", hash = "sha256:7f57f14606cd1dd0f0de396e1e53824c371e9544a822648cd76c034d209b559c"}, + {file = "google_crc32c-1.5.0-cp39-cp39-win_amd64.whl", hash = "sha256:a2355cba1f4ad8b6988a4ca3feed5bff33f6af2d7f134852cf279c2aebfde541"}, + {file = "google_crc32c-1.5.0-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:f314013e7dcd5cf45ab1945d92e713eec788166262ae8deb2cfacd53def27325"}, + {file = "google_crc32c-1.5.0-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3b747a674c20a67343cb61d43fdd9207ce5da6a99f629c6e2541aa0e89215bcd"}, + {file = "google_crc32c-1.5.0-pp37-pypy37_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8f24ed114432de109aa9fd317278518a5af2d31ac2ea6b952b2f7782b43da091"}, + {file = "google_crc32c-1.5.0-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b8667b48e7a7ef66afba2c81e1094ef526388d35b873966d8a9a447974ed9178"}, + {file = "google_crc32c-1.5.0-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:1c7abdac90433b09bad6c43a43af253e688c9cfc1c86d332aed13f9a7c7f65e2"}, + {file = "google_crc32c-1.5.0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:6f998db4e71b645350b9ac28a2167e6632c239963ca9da411523bb439c5c514d"}, + {file = "google_crc32c-1.5.0-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9c99616c853bb585301df6de07ca2cadad344fd1ada6d62bb30aec05219c45d2"}, + {file = "google_crc32c-1.5.0-pp38-pypy38_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2ad40e31093a4af319dadf503b2467ccdc8f67c72e4bcba97f8c10cb078207b5"}, + {file = "google_crc32c-1.5.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cd67cf24a553339d5062eff51013780a00d6f97a39ca062781d06b3a73b15462"}, + {file = "google_crc32c-1.5.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:398af5e3ba9cf768787eef45c803ff9614cc3e22a5b2f7d7ae116df8b11e3314"}, + {file = "google_crc32c-1.5.0-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:b1f8133c9a275df5613a451e73f36c2aea4fe13c5c8997e22cf355ebd7bd0728"}, + {file = "google_crc32c-1.5.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9ba053c5f50430a3fcfd36f75aff9caeba0440b2d076afdb79a318d6ca245f88"}, + {file = "google_crc32c-1.5.0-pp39-pypy39_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:272d3892a1e1a2dbc39cc5cde96834c236d5327e2122d3aaa19f6614531bb6eb"}, + {file = "google_crc32c-1.5.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:635f5d4dd18758a1fbd1049a8e8d2fee4ffed124462d837d1a02a0e009c3ab31"}, + {file = "google_crc32c-1.5.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:c672d99a345849301784604bfeaeba4db0c7aae50b95be04dd651fd2a7310b93"}, +] + +[package.extras] +testing = ["pytest"] + +[[package]] +name = "google-resumable-media" +version = "2.6.0" +description = "Utilities for Google Media Downloads and Resumable Uploads" +optional = true +python-versions = ">= 3.7" +files = [ + {file = "google-resumable-media-2.6.0.tar.gz", hash = "sha256:972852f6c65f933e15a4a210c2b96930763b47197cdf4aa5f5bea435efb626e7"}, + {file = "google_resumable_media-2.6.0-py2.py3-none-any.whl", hash = "sha256:fc03d344381970f79eebb632a3c18bb1828593a2dc5572b5f90115ef7d11e81b"}, +] + +[package.dependencies] +google-crc32c = ">=1.0,<2.0dev" + +[package.extras] +aiohttp = ["aiohttp (>=3.6.2,<4.0.0dev)", "google-auth (>=1.22.0,<2.0dev)"] +requests = ["requests (>=2.18.0,<3.0.0dev)"] + +[[package]] +name = "googleapis-common-protos" +version = "1.60.0" +description = "Common protobufs used in Google APIs" +optional = true +python-versions = ">=3.7" +files = [ + {file = "googleapis-common-protos-1.60.0.tar.gz", hash = "sha256:e73ebb404098db405ba95d1e1ae0aa91c3e15a71da031a2eeb6b2e23e7bc3708"}, + {file = "googleapis_common_protos-1.60.0-py2.py3-none-any.whl", hash = "sha256:69f9bbcc6acde92cab2db95ce30a70bd2b81d20b12eff3f1aabaffcbe8a93918"}, +] + +[package.dependencies] +protobuf = ">=3.19.5,<3.20.0 || >3.20.0,<3.20.1 || >3.20.1,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<5.0.0.dev0" + +[package.extras] +grpc = ["grpcio (>=1.44.0,<2.0.0.dev0)"] + +[[package]] +name = "greenlet" +version = "2.0.2" +description = "Lightweight in-process concurrent programming" +optional = true +python-versions = ">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*" +files = [ + {file = "greenlet-2.0.2-cp27-cp27m-macosx_10_14_x86_64.whl", hash = "sha256:bdfea8c661e80d3c1c99ad7c3ff74e6e87184895bbaca6ee8cc61209f8b9b85d"}, + {file = "greenlet-2.0.2-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:9d14b83fab60d5e8abe587d51c75b252bcc21683f24699ada8fb275d7712f5a9"}, + {file = "greenlet-2.0.2-cp27-cp27m-win32.whl", hash = "sha256:6c3acb79b0bfd4fe733dff8bc62695283b57949ebcca05ae5c129eb606ff2d74"}, + {file = "greenlet-2.0.2-cp27-cp27m-win_amd64.whl", hash = "sha256:283737e0da3f08bd637b5ad058507e578dd462db259f7f6e4c5c365ba4ee9343"}, + {file = "greenlet-2.0.2-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:d27ec7509b9c18b6d73f2f5ede2622441de812e7b1a80bbd446cb0633bd3d5ae"}, + {file = "greenlet-2.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d967650d3f56af314b72df7089d96cda1083a7fc2da05b375d2bc48c82ab3f3c"}, + {file = "greenlet-2.0.2-cp310-cp310-macosx_11_0_x86_64.whl", hash = "sha256:30bcf80dda7f15ac77ba5af2b961bdd9dbc77fd4ac6105cee85b0d0a5fcf74df"}, + {file = "greenlet-2.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:26fbfce90728d82bc9e6c38ea4d038cba20b7faf8a0ca53a9c07b67318d46088"}, + {file = "greenlet-2.0.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9190f09060ea4debddd24665d6804b995a9c122ef5917ab26e1566dcc712ceeb"}, + {file = "greenlet-2.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d75209eed723105f9596807495d58d10b3470fa6732dd6756595e89925ce2470"}, + {file = "greenlet-2.0.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:3a51c9751078733d88e013587b108f1b7a1fb106d402fb390740f002b6f6551a"}, + {file = "greenlet-2.0.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:76ae285c8104046b3a7f06b42f29c7b73f77683df18c49ab5af7983994c2dd91"}, + {file = "greenlet-2.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:2d4686f195e32d36b4d7cf2d166857dbd0ee9f3d20ae349b6bf8afc8485b3645"}, + {file = "greenlet-2.0.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:c4302695ad8027363e96311df24ee28978162cdcdd2006476c43970b384a244c"}, + {file = "greenlet-2.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d4606a527e30548153be1a9f155f4e283d109ffba663a15856089fb55f933e47"}, + {file = "greenlet-2.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c48f54ef8e05f04d6eff74b8233f6063cb1ed960243eacc474ee73a2ea8573ca"}, + {file = "greenlet-2.0.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a1846f1b999e78e13837c93c778dcfc3365902cfb8d1bdb7dd73ead37059f0d0"}, + {file = "greenlet-2.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3a06ad5312349fec0ab944664b01d26f8d1f05009566339ac6f63f56589bc1a2"}, + {file = "greenlet-2.0.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:eff4eb9b7eb3e4d0cae3d28c283dc16d9bed6b193c2e1ace3ed86ce48ea8df19"}, + {file = "greenlet-2.0.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:5454276c07d27a740c5892f4907c86327b632127dd9abec42ee62e12427ff7e3"}, + {file = "greenlet-2.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:7cafd1208fdbe93b67c7086876f061f660cfddc44f404279c1585bbf3cdc64c5"}, + {file = "greenlet-2.0.2-cp35-cp35m-macosx_10_14_x86_64.whl", hash = "sha256:910841381caba4f744a44bf81bfd573c94e10b3045ee00de0cbf436fe50673a6"}, + {file = "greenlet-2.0.2-cp35-cp35m-manylinux2010_x86_64.whl", hash = "sha256:18a7f18b82b52ee85322d7a7874e676f34ab319b9f8cce5de06067384aa8ff43"}, + {file = "greenlet-2.0.2-cp35-cp35m-win32.whl", hash = "sha256:03a8f4f3430c3b3ff8d10a2a86028c660355ab637cee9333d63d66b56f09d52a"}, + {file = "greenlet-2.0.2-cp35-cp35m-win_amd64.whl", hash = "sha256:4b58adb399c4d61d912c4c331984d60eb66565175cdf4a34792cd9600f21b394"}, + {file = "greenlet-2.0.2-cp36-cp36m-macosx_10_14_x86_64.whl", hash = "sha256:703f18f3fda276b9a916f0934d2fb6d989bf0b4fb5a64825260eb9bfd52d78f0"}, + {file = "greenlet-2.0.2-cp36-cp36m-manylinux2010_x86_64.whl", hash = "sha256:32e5b64b148966d9cccc2c8d35a671409e45f195864560829f395a54226408d3"}, + {file = "greenlet-2.0.2-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2dd11f291565a81d71dab10b7033395b7a3a5456e637cf997a6f33ebdf06f8db"}, + {file = "greenlet-2.0.2-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e0f72c9ddb8cd28532185f54cc1453f2c16fb417a08b53a855c4e6a418edd099"}, + {file = "greenlet-2.0.2-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cd021c754b162c0fb55ad5d6b9d960db667faad0fa2ff25bb6e1301b0b6e6a75"}, + {file = "greenlet-2.0.2-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:3c9b12575734155d0c09d6c3e10dbd81665d5c18e1a7c6597df72fd05990c8cf"}, + {file = "greenlet-2.0.2-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:b9ec052b06a0524f0e35bd8790686a1da006bd911dd1ef7d50b77bfbad74e292"}, + {file = "greenlet-2.0.2-cp36-cp36m-win32.whl", hash = "sha256:dbfcfc0218093a19c252ca8eb9aee3d29cfdcb586df21049b9d777fd32c14fd9"}, + {file = "greenlet-2.0.2-cp36-cp36m-win_amd64.whl", hash = "sha256:9f35ec95538f50292f6d8f2c9c9f8a3c6540bbfec21c9e5b4b751e0a7c20864f"}, + {file = "greenlet-2.0.2-cp37-cp37m-macosx_10_15_x86_64.whl", hash = "sha256:d5508f0b173e6aa47273bdc0a0b5ba055b59662ba7c7ee5119528f466585526b"}, + {file = "greenlet-2.0.2-cp37-cp37m-manylinux2010_x86_64.whl", hash = "sha256:f82d4d717d8ef19188687aa32b8363e96062911e63ba22a0cff7802a8e58e5f1"}, + {file = "greenlet-2.0.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c9c59a2120b55788e800d82dfa99b9e156ff8f2227f07c5e3012a45a399620b7"}, + {file = "greenlet-2.0.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2780572ec463d44c1d3ae850239508dbeb9fed38e294c68d19a24d925d9223ca"}, + {file = "greenlet-2.0.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:937e9020b514ceedb9c830c55d5c9872abc90f4b5862f89c0887033ae33c6f73"}, + {file = "greenlet-2.0.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:36abbf031e1c0f79dd5d596bfaf8e921c41df2bdf54ee1eed921ce1f52999a86"}, + {file = "greenlet-2.0.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:18e98fb3de7dba1c0a852731c3070cf022d14f0d68b4c87a19cc1016f3bb8b33"}, + {file = "greenlet-2.0.2-cp37-cp37m-win32.whl", hash = "sha256:3f6ea9bd35eb450837a3d80e77b517ea5bc56b4647f5502cd28de13675ee12f7"}, + {file = "greenlet-2.0.2-cp37-cp37m-win_amd64.whl", hash = "sha256:7492e2b7bd7c9b9916388d9df23fa49d9b88ac0640db0a5b4ecc2b653bf451e3"}, + {file = "greenlet-2.0.2-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:b864ba53912b6c3ab6bcb2beb19f19edd01a6bfcbdfe1f37ddd1778abfe75a30"}, + {file = "greenlet-2.0.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:1087300cf9700bbf455b1b97e24db18f2f77b55302a68272c56209d5587c12d1"}, + {file = "greenlet-2.0.2-cp38-cp38-manylinux2010_x86_64.whl", hash = "sha256:ba2956617f1c42598a308a84c6cf021a90ff3862eddafd20c3333d50f0edb45b"}, + {file = "greenlet-2.0.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fc3a569657468b6f3fb60587e48356fe512c1754ca05a564f11366ac9e306526"}, + {file = "greenlet-2.0.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8eab883b3b2a38cc1e050819ef06a7e6344d4a990d24d45bc6f2cf959045a45b"}, + {file = "greenlet-2.0.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:acd2162a36d3de67ee896c43effcd5ee3de247eb00354db411feb025aa319857"}, + {file = "greenlet-2.0.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:0bf60faf0bc2468089bdc5edd10555bab6e85152191df713e2ab1fcc86382b5a"}, + {file = "greenlet-2.0.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:b0ef99cdbe2b682b9ccbb964743a6aca37905fda5e0452e5ee239b1654d37f2a"}, + {file = "greenlet-2.0.2-cp38-cp38-win32.whl", hash = "sha256:b80f600eddddce72320dbbc8e3784d16bd3fb7b517e82476d8da921f27d4b249"}, + {file = "greenlet-2.0.2-cp38-cp38-win_amd64.whl", hash = "sha256:4d2e11331fc0c02b6e84b0d28ece3a36e0548ee1a1ce9ddde03752d9b79bba40"}, + {file = "greenlet-2.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:8512a0c38cfd4e66a858ddd1b17705587900dd760c6003998e9472b77b56d417"}, + {file = "greenlet-2.0.2-cp39-cp39-macosx_11_0_x86_64.whl", hash = "sha256:88d9ab96491d38a5ab7c56dd7a3cc37d83336ecc564e4e8816dbed12e5aaefc8"}, + {file = "greenlet-2.0.2-cp39-cp39-manylinux2010_x86_64.whl", hash = "sha256:561091a7be172ab497a3527602d467e2b3fbe75f9e783d8b8ce403fa414f71a6"}, + {file = "greenlet-2.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:971ce5e14dc5e73715755d0ca2975ac88cfdaefcaab078a284fea6cfabf866df"}, + {file = "greenlet-2.0.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:be4ed120b52ae4d974aa40215fcdfde9194d63541c7ded40ee12eb4dda57b76b"}, + {file = "greenlet-2.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:94c817e84245513926588caf1152e3b559ff794d505555211ca041f032abbb6b"}, + {file = "greenlet-2.0.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:1a819eef4b0e0b96bb0d98d797bef17dc1b4a10e8d7446be32d1da33e095dbb8"}, + {file = "greenlet-2.0.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:7efde645ca1cc441d6dc4b48c0f7101e8d86b54c8530141b09fd31cef5149ec9"}, + {file = "greenlet-2.0.2-cp39-cp39-win32.whl", hash = "sha256:ea9872c80c132f4663822dd2a08d404073a5a9b5ba6155bea72fb2a79d1093b5"}, + {file = "greenlet-2.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:db1a39669102a1d8d12b57de2bb7e2ec9066a6f2b3da35ae511ff93b01b5d564"}, + {file = "greenlet-2.0.2.tar.gz", hash = "sha256:e7c8dc13af7db097bed64a051d2dd49e9f0af495c26995c00a9ee842690d34c0"}, +] + +[package.extras] +docs = ["Sphinx", "docutils (<0.18)"] +test = ["objgraph", "psutil"] + +[[package]] +name = "identify" +version = "2.5.29" +description = "File identification library for Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "identify-2.5.29-py2.py3-none-any.whl", hash = "sha256:24437fbf6f4d3fe6efd0eb9d67e24dd9106db99af5ceb27996a5f7895f24bf1b"}, + {file = "identify-2.5.29.tar.gz", hash = "sha256:d43d52b86b15918c137e3a74fff5224f60385cd0e9c38e99d07c257f02f151a5"}, +] + +[package.extras] +license = ["ukkonen"] + +[[package]] +name = "idna" +version = "3.4" +description = "Internationalized Domain Names in Applications (IDNA)" +optional = false +python-versions = ">=3.5" +files = [ + {file = "idna-3.4-py3-none-any.whl", hash = "sha256:90b77e79eaa3eba6de819a0c442c0b4ceefc341a7a2ab77d7562bf49f425c5c2"}, + {file = "idna-3.4.tar.gz", hash = "sha256:814f528e8dead7d329833b91c5faa87d60bf71824cd12a7530b5526063d02cb4"}, +] + +[[package]] +name = "importlib-metadata" +version = "6.8.0" +description = "Read metadata from Python packages" +optional = false +python-versions = ">=3.8" +files = [ + {file = "importlib_metadata-6.8.0-py3-none-any.whl", hash = "sha256:3ebb78df84a805d7698245025b975d9d67053cd94c79245ba4b3eb694abe68bb"}, + {file = "importlib_metadata-6.8.0.tar.gz", hash = "sha256:dbace7892d8c0c4ac1ad096662232f831d4e64f4c4545bd53016a3e9d4654743"}, +] + +[package.dependencies] +zipp = ">=0.5" + +[package.extras] +docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] +perf = ["ipython"] +testing = ["flufl.flake8", "importlib-resources (>=1.3)", "packaging", "pyfakefs", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy (>=0.9.1)", "pytest-perf (>=0.9.2)", "pytest-ruff"] + +[[package]] +name = "importlib-resources" +version = "6.1.0" +description = "Read resources from Python packages" +optional = true +python-versions = ">=3.8" +files = [ + {file = "importlib_resources-6.1.0-py3-none-any.whl", hash = "sha256:aa50258bbfa56d4e33fbd8aa3ef48ded10d1735f11532b8df95388cc6bdb7e83"}, + {file = "importlib_resources-6.1.0.tar.gz", hash = "sha256:9d48dcccc213325e810fd723e7fbb45ccb39f6cf5c31f00cf2b965f5f10f3cb9"}, +] + +[package.dependencies] +zipp = {version = ">=3.1.0", markers = "python_version < \"3.10\""} + +[package.extras] +docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (<7.2.5)", "sphinx (>=3.5)", "sphinx-lint"] +testing = ["pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy (>=0.9.1)", "pytest-ruff", "zipp (>=3.17)"] + +[[package]] +name = "iniconfig" +version = "2.0.0" +description = "brain-dead simple config-ini parsing" +optional = false +python-versions = ">=3.7" +files = [ + {file = "iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374"}, + {file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"}, +] + +[[package]] +name = "isodate" +version = "0.6.1" +description = "An ISO 8601 date/time/duration parser and formatter" +optional = true +python-versions = "*" +files = [ + {file = "isodate-0.6.1-py2.py3-none-any.whl", hash = "sha256:0751eece944162659049d35f4f549ed815792b38793f07cf73381c1c87cbed96"}, + {file = "isodate-0.6.1.tar.gz", hash = "sha256:48c5881de7e8b0a0d648cb024c8062dc84e7b840ed81e864c7614fd3c127bde9"}, +] + +[package.dependencies] +six = "*" + +[[package]] +name = "jinja2" +version = "3.1.2" +description = "A very fast and expressive template engine." +optional = false +python-versions = ">=3.7" +files = [ + {file = "Jinja2-3.1.2-py3-none-any.whl", hash = "sha256:6088930bfe239f0e6710546ab9c19c9ef35e29792895fed6e6e31a023a182a61"}, + {file = "Jinja2-3.1.2.tar.gz", hash = "sha256:31351a702a408a9e7595a8fc6150fc3f43bb6bf7e319770cbc0db9df9437e852"}, +] + +[package.dependencies] +MarkupSafe = ">=2.0" + +[package.extras] +i18n = ["Babel (>=2.7)"] + +[[package]] +name = "jmespath" +version = "1.0.1" +description = "JSON Matching Expressions" +optional = false +python-versions = ">=3.7" +files = [ + {file = "jmespath-1.0.1-py3-none-any.whl", hash = "sha256:02e2e4cc71b5bcab88332eebf907519190dd9e6e82107fa7f83b1003a6252980"}, + {file = "jmespath-1.0.1.tar.gz", hash = "sha256:90261b206d6defd58fdd5e85f478bf633a2901798906be2ad389150c5c60edbe"}, +] + +[[package]] +name = "jsonschema" +version = "4.19.1" +description = "An implementation of JSON Schema validation for Python" +optional = true +python-versions = ">=3.8" +files = [ + {file = "jsonschema-4.19.1-py3-none-any.whl", hash = "sha256:cd5f1f9ed9444e554b38ba003af06c0a8c2868131e56bfbef0550fb450c0330e"}, + {file = "jsonschema-4.19.1.tar.gz", hash = "sha256:ec84cc37cfa703ef7cd4928db24f9cb31428a5d0fa77747b8b51a847458e0bbf"}, +] + +[package.dependencies] +attrs = ">=22.2.0" +importlib-resources = {version = ">=1.4.0", markers = "python_version < \"3.9\""} +jsonschema-specifications = ">=2023.03.6" +pkgutil-resolve-name = {version = ">=1.3.10", markers = "python_version < \"3.9\""} +referencing = ">=0.28.4" +rpds-py = ">=0.7.1" + +[package.extras] +format = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3987", "uri-template", "webcolors (>=1.11)"] +format-nongpl = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3986-validator (>0.1.0)", "uri-template", "webcolors (>=1.11)"] + +[[package]] +name = "jsonschema-specifications" +version = "2023.7.1" +description = "The JSON Schema meta-schemas and vocabularies, exposed as a Registry" +optional = true +python-versions = ">=3.8" +files = [ + {file = "jsonschema_specifications-2023.7.1-py3-none-any.whl", hash = "sha256:05adf340b659828a004220a9613be00fa3f223f2b82002e273dee62fd50524b1"}, + {file = "jsonschema_specifications-2023.7.1.tar.gz", hash = "sha256:c91a50404e88a1f6ba40636778e2ee08f6e24c5613fe4c53ac24578a5a7f72bb"}, +] + +[package.dependencies] +importlib-resources = {version = ">=1.4.0", markers = "python_version < \"3.9\""} +referencing = ">=0.28.0" + +[[package]] +name = "markdown-it-py" +version = "3.0.0" +description = "Python port of markdown-it. Markdown parsing, done right!" +optional = false +python-versions = ">=3.8" +files = [ + {file = "markdown-it-py-3.0.0.tar.gz", hash = "sha256:e3f60a94fa066dc52ec76661e37c851cb232d92f9886b15cb560aaada2df8feb"}, + {file = "markdown_it_py-3.0.0-py3-none-any.whl", hash = "sha256:355216845c60bd96232cd8d8c40e8f9765cc86f46880e43a8fd22dc1a1a8cab1"}, +] + +[package.dependencies] +mdurl = ">=0.1,<1.0" + +[package.extras] +benchmarking = ["psutil", "pytest", "pytest-benchmark"] +code-style = ["pre-commit (>=3.0,<4.0)"] +compare = ["commonmark (>=0.9,<1.0)", "markdown (>=3.4,<4.0)", "mistletoe (>=1.0,<2.0)", "mistune (>=2.0,<3.0)", "panflute (>=2.3,<3.0)"] +linkify = ["linkify-it-py (>=1,<3)"] +plugins = ["mdit-py-plugins"] +profiling = ["gprof2dot"] +rtd = ["jupyter_sphinx", "mdit-py-plugins", "myst-parser", "pyyaml", "sphinx", "sphinx-copybutton", "sphinx-design", "sphinx_book_theme"] +testing = ["coverage", "pytest", "pytest-cov", "pytest-regressions"] + +[[package]] +name = "markupsafe" +version = "2.1.3" +description = "Safely add untrusted strings to HTML/XML markup." +optional = false +python-versions = ">=3.7" +files = [ + {file = "MarkupSafe-2.1.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:cd0f502fe016460680cd20aaa5a76d241d6f35a1c3350c474bac1273803893fa"}, + {file = "MarkupSafe-2.1.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e09031c87a1e51556fdcb46e5bd4f59dfb743061cf93c4d6831bf894f125eb57"}, + {file = "MarkupSafe-2.1.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:68e78619a61ecf91e76aa3e6e8e33fc4894a2bebe93410754bd28fce0a8a4f9f"}, + {file = "MarkupSafe-2.1.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:65c1a9bcdadc6c28eecee2c119465aebff8f7a584dd719facdd9e825ec61ab52"}, + {file = "MarkupSafe-2.1.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:525808b8019e36eb524b8c68acdd63a37e75714eac50e988180b169d64480a00"}, + {file = "MarkupSafe-2.1.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:962f82a3086483f5e5f64dbad880d31038b698494799b097bc59c2edf392fce6"}, + {file = "MarkupSafe-2.1.3-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:aa7bd130efab1c280bed0f45501b7c8795f9fdbeb02e965371bbef3523627779"}, + {file = "MarkupSafe-2.1.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:c9c804664ebe8f83a211cace637506669e7890fec1b4195b505c214e50dd4eb7"}, + {file = "MarkupSafe-2.1.3-cp310-cp310-win32.whl", hash = "sha256:10bbfe99883db80bdbaff2dcf681dfc6533a614f700da1287707e8a5d78a8431"}, + {file = "MarkupSafe-2.1.3-cp310-cp310-win_amd64.whl", hash = "sha256:1577735524cdad32f9f694208aa75e422adba74f1baee7551620e43a3141f559"}, + {file = "MarkupSafe-2.1.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:ad9e82fb8f09ade1c3e1b996a6337afac2b8b9e365f926f5a61aacc71adc5b3c"}, + {file = "MarkupSafe-2.1.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3c0fae6c3be832a0a0473ac912810b2877c8cb9d76ca48de1ed31e1c68386575"}, + {file = "MarkupSafe-2.1.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b076b6226fb84157e3f7c971a47ff3a679d837cf338547532ab866c57930dbee"}, + {file = "MarkupSafe-2.1.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bfce63a9e7834b12b87c64d6b155fdd9b3b96191b6bd334bf37db7ff1fe457f2"}, + {file = "MarkupSafe-2.1.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:338ae27d6b8745585f87218a3f23f1512dbf52c26c28e322dbe54bcede54ccb9"}, + {file = "MarkupSafe-2.1.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e4dd52d80b8c83fdce44e12478ad2e85c64ea965e75d66dbeafb0a3e77308fcc"}, + {file = "MarkupSafe-2.1.3-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:df0be2b576a7abbf737b1575f048c23fb1d769f267ec4358296f31c2479db8f9"}, + {file = "MarkupSafe-2.1.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:5bbe06f8eeafd38e5d0a4894ffec89378b6c6a625ff57e3028921f8ff59318ac"}, + {file = "MarkupSafe-2.1.3-cp311-cp311-win32.whl", hash = "sha256:dd15ff04ffd7e05ffcb7fe79f1b98041b8ea30ae9234aed2a9168b5797c3effb"}, + {file = "MarkupSafe-2.1.3-cp311-cp311-win_amd64.whl", hash = "sha256:134da1eca9ec0ae528110ccc9e48041e0828d79f24121a1a146161103c76e686"}, + {file = "MarkupSafe-2.1.3-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:f698de3fd0c4e6972b92290a45bd9b1536bffe8c6759c62471efaa8acb4c37bc"}, + {file = "MarkupSafe-2.1.3-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:aa57bd9cf8ae831a362185ee444e15a93ecb2e344c8e52e4d721ea3ab6ef1823"}, + {file = "MarkupSafe-2.1.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ffcc3f7c66b5f5b7931a5aa68fc9cecc51e685ef90282f4a82f0f5e9b704ad11"}, + {file = "MarkupSafe-2.1.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:47d4f1c5f80fc62fdd7777d0d40a2e9dda0a05883ab11374334f6c4de38adffd"}, + {file = "MarkupSafe-2.1.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1f67c7038d560d92149c060157d623c542173016c4babc0c1913cca0564b9939"}, + {file = "MarkupSafe-2.1.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:9aad3c1755095ce347e26488214ef77e0485a3c34a50c5a5e2471dff60b9dd9c"}, + {file = "MarkupSafe-2.1.3-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:14ff806850827afd6b07a5f32bd917fb7f45b046ba40c57abdb636674a8b559c"}, + {file = "MarkupSafe-2.1.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8f9293864fe09b8149f0cc42ce56e3f0e54de883a9de90cd427f191c346eb2e1"}, + {file = "MarkupSafe-2.1.3-cp312-cp312-win32.whl", hash = "sha256:715d3562f79d540f251b99ebd6d8baa547118974341db04f5ad06d5ea3eb8007"}, + {file = "MarkupSafe-2.1.3-cp312-cp312-win_amd64.whl", hash = "sha256:1b8dd8c3fd14349433c79fa8abeb573a55fc0fdd769133baac1f5e07abf54aeb"}, + {file = "MarkupSafe-2.1.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:8e254ae696c88d98da6555f5ace2279cf7cd5b3f52be2b5cf97feafe883b58d2"}, + {file = "MarkupSafe-2.1.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cb0932dc158471523c9637e807d9bfb93e06a95cbf010f1a38b98623b929ef2b"}, + {file = "MarkupSafe-2.1.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9402b03f1a1b4dc4c19845e5c749e3ab82d5078d16a2a4c2cd2df62d57bb0707"}, + {file = "MarkupSafe-2.1.3-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ca379055a47383d02a5400cb0d110cef0a776fc644cda797db0c5696cfd7e18e"}, + {file = "MarkupSafe-2.1.3-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:b7ff0f54cb4ff66dd38bebd335a38e2c22c41a8ee45aa608efc890ac3e3931bc"}, + {file = "MarkupSafe-2.1.3-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:c011a4149cfbcf9f03994ec2edffcb8b1dc2d2aede7ca243746df97a5d41ce48"}, + {file = "MarkupSafe-2.1.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:56d9f2ecac662ca1611d183feb03a3fa4406469dafe241673d521dd5ae92a155"}, + {file = "MarkupSafe-2.1.3-cp37-cp37m-win32.whl", hash = "sha256:8758846a7e80910096950b67071243da3e5a20ed2546e6392603c096778d48e0"}, + {file = "MarkupSafe-2.1.3-cp37-cp37m-win_amd64.whl", hash = "sha256:787003c0ddb00500e49a10f2844fac87aa6ce977b90b0feaaf9de23c22508b24"}, + {file = "MarkupSafe-2.1.3-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:2ef12179d3a291be237280175b542c07a36e7f60718296278d8593d21ca937d4"}, + {file = "MarkupSafe-2.1.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:2c1b19b3aaacc6e57b7e25710ff571c24d6c3613a45e905b1fde04d691b98ee0"}, + {file = "MarkupSafe-2.1.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8afafd99945ead6e075b973fefa56379c5b5c53fd8937dad92c662da5d8fd5ee"}, + {file = "MarkupSafe-2.1.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8c41976a29d078bb235fea9b2ecd3da465df42a562910f9022f1a03107bd02be"}, + {file = "MarkupSafe-2.1.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d080e0a5eb2529460b30190fcfcc4199bd7f827663f858a226a81bc27beaa97e"}, + {file = "MarkupSafe-2.1.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:69c0f17e9f5a7afdf2cc9fb2d1ce6aabdb3bafb7f38017c0b77862bcec2bbad8"}, + {file = "MarkupSafe-2.1.3-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:504b320cd4b7eff6f968eddf81127112db685e81f7e36e75f9f84f0df46041c3"}, + {file = "MarkupSafe-2.1.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:42de32b22b6b804f42c5d98be4f7e5e977ecdd9ee9b660fda1a3edf03b11792d"}, + {file = "MarkupSafe-2.1.3-cp38-cp38-win32.whl", hash = "sha256:ceb01949af7121f9fc39f7d27f91be8546f3fb112c608bc4029aef0bab86a2a5"}, + {file = "MarkupSafe-2.1.3-cp38-cp38-win_amd64.whl", hash = "sha256:1b40069d487e7edb2676d3fbdb2b0829ffa2cd63a2ec26c4938b2d34391b4ecc"}, + {file = "MarkupSafe-2.1.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:8023faf4e01efadfa183e863fefde0046de576c6f14659e8782065bcece22198"}, + {file = "MarkupSafe-2.1.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6b2b56950d93e41f33b4223ead100ea0fe11f8e6ee5f641eb753ce4b77a7042b"}, + {file = "MarkupSafe-2.1.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9dcdfd0eaf283af041973bff14a2e143b8bd64e069f4c383416ecd79a81aab58"}, + {file = "MarkupSafe-2.1.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:05fb21170423db021895e1ea1e1f3ab3adb85d1c2333cbc2310f2a26bc77272e"}, + {file = "MarkupSafe-2.1.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:282c2cb35b5b673bbcadb33a585408104df04f14b2d9b01d4c345a3b92861c2c"}, + {file = "MarkupSafe-2.1.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:ab4a0df41e7c16a1392727727e7998a467472d0ad65f3ad5e6e765015df08636"}, + {file = "MarkupSafe-2.1.3-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:7ef3cb2ebbf91e330e3bb937efada0edd9003683db6b57bb108c4001f37a02ea"}, + {file = "MarkupSafe-2.1.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:0a4e4a1aff6c7ac4cd55792abf96c915634c2b97e3cc1c7129578aa68ebd754e"}, + {file = "MarkupSafe-2.1.3-cp39-cp39-win32.whl", hash = "sha256:fec21693218efe39aa7f8599346e90c705afa52c5b31ae019b2e57e8f6542bb2"}, + {file = "MarkupSafe-2.1.3-cp39-cp39-win_amd64.whl", hash = "sha256:3fd4abcb888d15a94f32b75d8fd18ee162ca0c064f35b11134be77050296d6ba"}, + {file = "MarkupSafe-2.1.3.tar.gz", hash = "sha256:af598ed32d6ae86f1b747b82783958b1a4ab8f617b06fe68795c7f026abbdcad"}, +] + +[[package]] +name = "mdurl" +version = "0.1.2" +description = "Markdown URL utilities" +optional = false +python-versions = ">=3.7" +files = [ + {file = "mdurl-0.1.2-py3-none-any.whl", hash = "sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8"}, + {file = "mdurl-0.1.2.tar.gz", hash = "sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba"}, +] + +[[package]] +name = "mmhash3" +version = "3.0.1" +description = "Python wrapper for MurmurHash (MurmurHash3), a set of fast and robust hash functions." +optional = false +python-versions = "*" +files = [ + {file = "mmhash3-3.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:47deea30cd8d3d5cd52dc740902a4c70383bfe8248eac29d0877fe63e03c2713"}, + {file = "mmhash3-3.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ecdaf4d1de617818bf05cd526ca558db6010beeba7ea9e19f695f2bdcac0e0a4"}, + {file = "mmhash3-3.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4675585617584e9e1aafa3a90ac0ac9a437257c507748d97de8b21977e9d6745"}, + {file = "mmhash3-3.0.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ebfd0c2af09b41f0fe1dd260799bda90a0fc7eba4477ccaeb3951527700cd58f"}, + {file = "mmhash3-3.0.1-cp310-cp310-win32.whl", hash = "sha256:68587dec7b8acdb7528fd511e295d8b5ccfe26022923a69867e1822f0fdb4c44"}, + {file = "mmhash3-3.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:54954ebe3a614f19ab4cfe24fa66ea093c493d9fac0533d002dd64c2540a0c99"}, + {file = "mmhash3-3.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b172f3bd3220b0bf56fd7cc760fd8a9033def47103611d63fe867003079a1256"}, + {file = "mmhash3-3.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:de7895eafabc32f7c0c09a81a624921f536768de6e438e3de69e3e954a4d7072"}, + {file = "mmhash3-3.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b4b0914effe4ddd8d33149e3508564c17719465b0cc81691c4fa50d5e0e14f80"}, + {file = "mmhash3-3.0.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c0575050ac691475938df1ad03d8738c5bd1eadef62093e76157ebb7f2df0946"}, + {file = "mmhash3-3.0.1-cp311-cp311-win32.whl", hash = "sha256:22f92f0f88f28b215357acd346362fa9f7c9fffb436beb42cc4b442b676dbaa3"}, + {file = "mmhash3-3.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:538240ab7936bf71b18304e5a7e7fd3c4c2fab103330ea99584bb4f777299a2b"}, + {file = "mmhash3-3.0.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:ca791bfb311e36ce13998e4632262ed4b95da9d3461941e18b6690760171a045"}, + {file = "mmhash3-3.0.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b41708f72c6aa2a49ada1f0b61e85c05cd8389ad31d463fd5bca29999a4d5f9c"}, + {file = "mmhash3-3.0.1-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a3ce9b4533ddc0a88ba045a27309714c3b127bd05e57fd252d1d5a71d4247ea7"}, + {file = "mmhash3-3.0.1-cp36-cp36m-win32.whl", hash = "sha256:bfafeb96fdeb10db8767d06e1f07b6fdcddba4aaa0dd15058561a49f7ae45345"}, + {file = "mmhash3-3.0.1-cp36-cp36m-win_amd64.whl", hash = "sha256:97fe077b24c948709ed2afc749bf6285d407bc54ff12c63d2dc86678c38a0b8e"}, + {file = "mmhash3-3.0.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:0cfd91ccd5fca1ba7ee925297131a15dfb94c714bfe6ba0fb3b1ca78b12bbfec"}, + {file = "mmhash3-3.0.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6d51b1005233141ce7394531af40a3f0fc1f274467bf8dff44dcf7987924fe58"}, + {file = "mmhash3-3.0.1-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:855c67b100e37df166acf79cdff58fa8f9f6c48be0d1e1b6e9ad0fa34a9661ef"}, + {file = "mmhash3-3.0.1-cp37-cp37m-win32.whl", hash = "sha256:bb3030df1334fd665427f8be8e8ce4f04aeab7f6010ce4f2c128f0099bdab96f"}, + {file = "mmhash3-3.0.1-cp37-cp37m-win_amd64.whl", hash = "sha256:1545e1177294afe4912d5a5e401c7fa9b799dd109b30289e7af74d5b07e7c474"}, + {file = "mmhash3-3.0.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:2479899e7dda834a671991a1098a691ab1c2eaa20c3e939d691ca4a19361cfe0"}, + {file = "mmhash3-3.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c9056196d5e3d3d844433a63d806a683f710ab3aaf1c910550c7746464bc43ae"}, + {file = "mmhash3-3.0.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a0d4c307af0bf70207305f70f131898be071d1b19a89f462b13487f5c25e8d4e"}, + {file = "mmhash3-3.0.1-cp38-cp38-win32.whl", hash = "sha256:5f885f65e329fd14bc38debac4a79eacf381e856965d9c65c4d1c6946ea190d0"}, + {file = "mmhash3-3.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:3b42d0bda5e1cd22c18b16887b0521060fb59d0aaaaf033feacbc0a2492d20fe"}, + {file = "mmhash3-3.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:d3f333286bb87aa9dc6bd8e7960a55a27b011a401f24b889a50e6d219f65e7c9"}, + {file = "mmhash3-3.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:6b7ef2eb95a18bcd02ce0d3e047adde3a025afd96c1d266a8a0d44574f44a307"}, + {file = "mmhash3-3.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d6ac8a5f511c60f341bf9cae462bb4941abb149d98464ba5f4f4548875b601c6"}, + {file = "mmhash3-3.0.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:efef9e632e6e248e46f52d108a5ebd1f28edaf427b7fd47ebf97dbff4b2cab81"}, + {file = "mmhash3-3.0.1-cp39-cp39-win32.whl", hash = "sha256:bdac06d72e448c67afb12e758b203d875e29d4097bb279a38a5649d44b518ba7"}, + {file = "mmhash3-3.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:0baeaa20cac5f75ed14f28056826bd9d9c8b2354b382073f3fd5190708992a0d"}, + {file = "mmhash3-3.0.1.tar.gz", hash = "sha256:a00d68f4a1cc434b9501513c8a29e18ed1ddad383677d72b41d71d0d862348af"}, +] + +[[package]] +name = "moto" +version = "4.2.4" +description = "" +optional = false +python-versions = ">=3.7" +files = [ + {file = "moto-4.2.4-py2.py3-none-any.whl", hash = "sha256:3516f55405015e4516c549d875c7a93e7daa1622d6342af335e63cf7bfe442fd"}, + {file = "moto-4.2.4.tar.gz", hash = "sha256:eea3c5b29987e8b12816b355dfdcca5d7a815a9d9f17208af31fa32acbe8b389"}, +] + +[package.dependencies] +boto3 = ">=1.9.201" +botocore = ">=1.12.201" +cryptography = ">=3.3.1" +Jinja2 = ">=2.10.1" +python-dateutil = ">=2.1,<3.0.0" +requests = ">=2.5" +responses = ">=0.13.0" +werkzeug = ">=0.5,<2.2.0 || >2.2.0,<2.2.1 || >2.2.1" +xmltodict = "*" + +[package.extras] +all = ["PyYAML (>=5.1)", "aws-xray-sdk (>=0.93,!=0.96)", "cfn-lint (>=0.40.0)", "docker (>=3.0.0)", "ecdsa (!=0.15)", "graphql-core", "jsondiff (>=1.1.2)", "openapi-spec-validator (>=0.2.8)", "py-partiql-parser (==0.3.7)", "pyparsing (>=3.0.7)", "python-jose[cryptography] (>=3.1.0,<4.0.0)", "setuptools", "sshpubkeys (>=3.1.0)"] +apigateway = ["PyYAML (>=5.1)", "ecdsa (!=0.15)", "openapi-spec-validator (>=0.2.8)", "python-jose[cryptography] (>=3.1.0,<4.0.0)"] +apigatewayv2 = ["PyYAML (>=5.1)"] +appsync = ["graphql-core"] +awslambda = ["docker (>=3.0.0)"] +batch = ["docker (>=3.0.0)"] +cloudformation = ["PyYAML (>=5.1)", "aws-xray-sdk (>=0.93,!=0.96)", "cfn-lint (>=0.40.0)", "docker (>=3.0.0)", "ecdsa (!=0.15)", "graphql-core", "jsondiff (>=1.1.2)", "openapi-spec-validator (>=0.2.8)", "py-partiql-parser (==0.3.7)", "pyparsing (>=3.0.7)", "python-jose[cryptography] (>=3.1.0,<4.0.0)", "setuptools", "sshpubkeys (>=3.1.0)"] +cognitoidp = ["ecdsa (!=0.15)", "python-jose[cryptography] (>=3.1.0,<4.0.0)"] +ds = ["sshpubkeys (>=3.1.0)"] +dynamodb = ["docker (>=3.0.0)", "py-partiql-parser (==0.3.7)"] +dynamodbstreams = ["docker (>=3.0.0)", "py-partiql-parser (==0.3.7)"] +ebs = ["sshpubkeys (>=3.1.0)"] +ec2 = ["sshpubkeys (>=3.1.0)"] +efs = ["sshpubkeys (>=3.1.0)"] +eks = ["sshpubkeys (>=3.1.0)"] +glue = ["pyparsing (>=3.0.7)"] +iotdata = ["jsondiff (>=1.1.2)"] +resourcegroupstaggingapi = ["PyYAML (>=5.1)", "cfn-lint (>=0.40.0)", "docker (>=3.0.0)", "ecdsa (!=0.15)", "graphql-core", "jsondiff (>=1.1.2)", "openapi-spec-validator (>=0.2.8)", "py-partiql-parser (==0.3.7)", "pyparsing (>=3.0.7)", "python-jose[cryptography] (>=3.1.0,<4.0.0)", "sshpubkeys (>=3.1.0)"] +route53resolver = ["sshpubkeys (>=3.1.0)"] +s3 = ["PyYAML (>=5.1)", "py-partiql-parser (==0.3.7)"] +s3crc32c = ["PyYAML (>=5.1)", "crc32c", "py-partiql-parser (==0.3.7)"] +server = ["PyYAML (>=5.1)", "aws-xray-sdk (>=0.93,!=0.96)", "cfn-lint (>=0.40.0)", "docker (>=3.0.0)", "ecdsa (!=0.15)", "flask (!=2.2.0,!=2.2.1)", "flask-cors", "graphql-core", "jsondiff (>=1.1.2)", "openapi-spec-validator (>=0.2.8)", "py-partiql-parser (==0.3.7)", "pyparsing (>=3.0.7)", "python-jose[cryptography] (>=3.1.0,<4.0.0)", "setuptools", "sshpubkeys (>=3.1.0)"] +ssm = ["PyYAML (>=5.1)"] +xray = ["aws-xray-sdk (>=0.93,!=0.96)", "setuptools"] + +[[package]] +name = "msal" +version = "1.24.0" +description = "The Microsoft Authentication Library (MSAL) for Python library" +optional = true +python-versions = ">=2.7" +files = [ + {file = "msal-1.24.0-py2.py3-none-any.whl", hash = "sha256:a7f2f342b80ba3fe168218003b6798cc81b83c9745284bf63fb8d4ec8e2dbc50"}, + {file = "msal-1.24.0.tar.gz", hash = "sha256:7d2ecdad41a5f73bb2b813f3061a4cf47c924621105a8ed137586fcb9d8f827e"}, +] + +[package.dependencies] +cryptography = ">=0.6,<44" +PyJWT = {version = ">=1.0.0,<3", extras = ["crypto"]} +requests = ">=2.0.0,<3" + +[package.extras] +broker = ["pymsalruntime (>=0.13.2,<0.14)"] + +[[package]] +name = "msal-extensions" +version = "1.0.0" +description = "Microsoft Authentication Library extensions (MSAL EX) provides a persistence API that can save your data on disk, encrypted on Windows, macOS and Linux. Concurrent data access will be coordinated by a file lock mechanism." +optional = true +python-versions = "*" +files = [ + {file = "msal-extensions-1.0.0.tar.gz", hash = "sha256:c676aba56b0cce3783de1b5c5ecfe828db998167875126ca4b47dc6436451354"}, + {file = "msal_extensions-1.0.0-py2.py3-none-any.whl", hash = "sha256:91e3db9620b822d0ed2b4d1850056a0f133cba04455e62f11612e40f5502f2ee"}, +] + +[package.dependencies] +msal = ">=0.4.1,<2.0.0" +portalocker = [ + {version = ">=1.0,<3", markers = "python_version >= \"3.5\" and platform_system != \"Windows\""}, + {version = ">=1.6,<3", markers = "python_version >= \"3.5\" and platform_system == \"Windows\""}, +] + +[[package]] +name = "msgpack" +version = "1.0.6" +description = "MessagePack serializer" +optional = true +python-versions = ">=3.8" +files = [ + {file = "msgpack-1.0.6-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:f4321692e7f299277e55f322329b2c972d93bb612d85f3fda8741bec5c6285ce"}, + {file = "msgpack-1.0.6-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:1f0e36a5fa7a182cde391a128a64f437657d2b9371dfa42eda3436245adccbf5"}, + {file = "msgpack-1.0.6-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:b5c8dd9a386a66e50bd7fa22b7a49fb8ead2b3574d6bd69eb1caced6caea0803"}, + {file = "msgpack-1.0.6-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9f85200ea102276afdd3749ca94747f057bbb868d1c52921ee2446730b508d0f"}, + {file = "msgpack-1.0.6-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7a006c300e82402c0c8f1ded11352a3ba2a61b87e7abb3054c845af2ca8d553c"}, + {file = "msgpack-1.0.6-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:33bbf47ea5a6ff20c23426106e81863cdbb5402de1825493026ce615039cc99d"}, + {file = "msgpack-1.0.6-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:04450e4b5e1e662e7c86b6aafb7c230af9334fd0becf5e6b80459a507884241c"}, + {file = "msgpack-1.0.6-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:b06a5095a79384760625b5de3f83f40b3053a385fb893be8a106fbbd84c14980"}, + {file = "msgpack-1.0.6-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:3910211b0ab20be3a38e0bb944ed45bd4265d8d9f11a3d1674b95b298e08dd5c"}, + {file = "msgpack-1.0.6-cp310-cp310-win32.whl", hash = "sha256:1dc67b40fe81217b308ab12651adba05e7300b3a2ccf84d6b35a878e308dd8d4"}, + {file = "msgpack-1.0.6-cp310-cp310-win_amd64.whl", hash = "sha256:885de1ed5ea01c1bfe0a34c901152a264c3c1f8f1d382042b92ea354bd14bb0e"}, + {file = "msgpack-1.0.6-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:099c3d8a027367e1a6fc55d15336f04ff65c60c4f737b5739f7db4525c65fe9e"}, + {file = "msgpack-1.0.6-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:9b88dc97ba86c96b964c3745a445d9a65f76fe21955a953064fe04adb63e9367"}, + {file = "msgpack-1.0.6-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:00ce5f827d4f26fc094043e6f08b6069c1b148efa2631c47615ae14fb6cafc89"}, + {file = "msgpack-1.0.6-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bd6af61388be65a8701f5787362cb54adae20007e0cc67ca9221a4b95115583b"}, + {file = "msgpack-1.0.6-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:652e4b7497825b0af6259e2c54700e6dc33d2fc4ed92b8839435090d4c9cc911"}, + {file = "msgpack-1.0.6-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5b08676a17e3f791daad34d5fcb18479e9c85e7200d5a17cbe8de798643a7e37"}, + {file = "msgpack-1.0.6-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:229ccb6713c8b941eaa5cf13dc7478eba117f21513b5893c35e44483e2f0c9c8"}, + {file = "msgpack-1.0.6-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:95ade0bd4cf69e04e8b8f8ec2d197d9c9c4a9b6902e048dc7456bf6d82e12a80"}, + {file = "msgpack-1.0.6-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:5b16344032a27b2ccfd341f89dadf3e4ef6407d91e4b93563c14644a8abb3ad7"}, + {file = "msgpack-1.0.6-cp311-cp311-win32.whl", hash = "sha256:55bb4a1bf94e39447bc08238a2fb8a767460388a8192f67c103442eb36920887"}, + {file = "msgpack-1.0.6-cp311-cp311-win_amd64.whl", hash = "sha256:ae97504958d0bc58c1152045c170815d5c4f8af906561ce044b6358b43d0c97e"}, + {file = "msgpack-1.0.6-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:7ecf431786019a7bfedc28281531d706627f603e3691d64eccdbce3ecd353823"}, + {file = "msgpack-1.0.6-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:a635aecf1047255576dbb0927cbf9a7aa4a68e9d54110cc3c926652d18f144e0"}, + {file = "msgpack-1.0.6-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:102cfb54eaefa73e8ca1e784b9352c623524185c98e057e519545131a56fb0af"}, + {file = "msgpack-1.0.6-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5c5e05e4f5756758c58a8088aa10dc70d851c89f842b611fdccfc0581c1846bc"}, + {file = "msgpack-1.0.6-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:68569509dd015fcdd1e6b2b3ccc8c51fd27d9a97f461ccc909270e220ee09685"}, + {file = "msgpack-1.0.6-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bf652839d16de91fe1cfb253e0a88db9a548796939533894e07f45d4bdf90a5f"}, + {file = "msgpack-1.0.6-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:14db7e1b7a7ed362b2f94897bf2486c899c8bb50f6e34b2db92fe534cdab306f"}, + {file = "msgpack-1.0.6-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:159cfec18a6e125dd4723e2b1de6f202b34b87c850fb9d509acfd054c01135e9"}, + {file = "msgpack-1.0.6-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:6a01a072b2219b65a6ff74df208f20b2cac9401c60adb676ee34e53b4c651077"}, + {file = "msgpack-1.0.6-cp312-cp312-win32.whl", hash = "sha256:e36560d001d4ba469d469b02037f2dd404421fd72277d9474efe9f03f83fced5"}, + {file = "msgpack-1.0.6-cp312-cp312-win_amd64.whl", hash = "sha256:5e7fae9ca93258a956551708cf60dc6c8145574e32ce8c8c4d894e63bcb04341"}, + {file = "msgpack-1.0.6-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:40b801b768f5a765e33c68f30665d3c6ee1c8623a2d2bb78e6e59f2db4e4ceb7"}, + {file = "msgpack-1.0.6-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:da057d3652e698b00746e47f06dbb513314f847421e857e32e1dc61c46f6c052"}, + {file = "msgpack-1.0.6-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:f75114c05ec56566da6b55122791cf5bb53d5aada96a98c016d6231e03132f76"}, + {file = "msgpack-1.0.6-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:61213482b5a387ead9e250e9e3cb290292feca39dc83b41c3b1b7b8ffc8d8ecb"}, + {file = "msgpack-1.0.6-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bae6c561f11b444b258b1b4be2bdd1e1cf93cd1d80766b7e869a79db4543a8a8"}, + {file = "msgpack-1.0.6-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:619a63753ba9e792fe3c6c0fc2b9ee2cfbd92153dd91bee029a89a71eb2942cd"}, + {file = "msgpack-1.0.6-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:70843788c85ca385846a2d2f836efebe7bb2687ca0734648bf5c9dc6c55602d2"}, + {file = "msgpack-1.0.6-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:fb4571efe86545b772a4630fee578c213c91cbcfd20347806e47fd4e782a18fe"}, + {file = "msgpack-1.0.6-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:bbb4448a05d261fae423d5c0b0974ad899f60825bc77eabad5a0c518e78448c2"}, + {file = "msgpack-1.0.6-cp38-cp38-win32.whl", hash = "sha256:5cd67674db3c73026e0a2c729b909780e88bd9cbc8184256f9567640a5d299a8"}, + {file = "msgpack-1.0.6-cp38-cp38-win_amd64.whl", hash = "sha256:a1cf98afa7ad5e7012454ca3fde254499a13f9d92fd50cb46118118a249a1355"}, + {file = "msgpack-1.0.6-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:d6d25b8a5c70e2334ed61a8da4c11cd9b97c6fbd980c406033f06e4463fda006"}, + {file = "msgpack-1.0.6-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:88cdb1da7fdb121dbb3116910722f5acab4d6e8bfcacab8fafe27e2e7744dc6a"}, + {file = "msgpack-1.0.6-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:3b5658b1f9e486a2eec4c0c688f213a90085b9cf2fec76ef08f98fdf6c62f4b9"}, + {file = "msgpack-1.0.6-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:76820f2ece3b0a7c948bbb6a599020e29574626d23a649476def023cbb026787"}, + {file = "msgpack-1.0.6-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9c780d992f5d734432726b92a0c87bf1857c3d85082a8dea29cbf56e44a132b3"}, + {file = "msgpack-1.0.6-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e0ed35d6d6122d0baa9a1b59ebca4ee302139f4cfb57dab85e4c73ab793ae7ed"}, + {file = "msgpack-1.0.6-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:32c0aff31f33033f4961abc01f78497e5e07bac02a508632aef394b384d27428"}, + {file = "msgpack-1.0.6-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:35ad5aed9b52217d4cea739d0ea3a492a18dd86fecb4b132668a69f27fb0363b"}, + {file = "msgpack-1.0.6-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:47275ff73005a3e5e146e50baa2378e1730cba6e292f0222bc496a8e4c4adfc8"}, + {file = "msgpack-1.0.6-cp39-cp39-win32.whl", hash = "sha256:7baf16fd8908a025c4a8d7b699103e72d41f967e2aee5a2065432bcdbd9fd06e"}, + {file = "msgpack-1.0.6-cp39-cp39-win_amd64.whl", hash = "sha256:fc97aa4b4fb928ff4d3b74da7c30b360d0cb3ede49a5a6e1fd9705f49aea1deb"}, + {file = "msgpack-1.0.6.tar.gz", hash = "sha256:25d3746da40f3c8c59c3b1d001e49fd2aa17904438f980d9a391370366df001e"}, +] + +[[package]] +name = "multidict" +version = "6.0.4" +description = "multidict implementation" +optional = true +python-versions = ">=3.7" +files = [ + {file = "multidict-6.0.4-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:0b1a97283e0c85772d613878028fec909f003993e1007eafa715b24b377cb9b8"}, + {file = "multidict-6.0.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:eeb6dcc05e911516ae3d1f207d4b0520d07f54484c49dfc294d6e7d63b734171"}, + {file = "multidict-6.0.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d6d635d5209b82a3492508cf5b365f3446afb65ae7ebd755e70e18f287b0adf7"}, + {file = "multidict-6.0.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c048099e4c9e9d615545e2001d3d8a4380bd403e1a0578734e0d31703d1b0c0b"}, + {file = "multidict-6.0.4-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ea20853c6dbbb53ed34cb4d080382169b6f4554d394015f1bef35e881bf83547"}, + {file = "multidict-6.0.4-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:16d232d4e5396c2efbbf4f6d4df89bfa905eb0d4dc5b3549d872ab898451f569"}, + {file = "multidict-6.0.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:36c63aaa167f6c6b04ef2c85704e93af16c11d20de1d133e39de6a0e84582a93"}, + {file = "multidict-6.0.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:64bdf1086b6043bf519869678f5f2757f473dee970d7abf6da91ec00acb9cb98"}, + {file = "multidict-6.0.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:43644e38f42e3af682690876cff722d301ac585c5b9e1eacc013b7a3f7b696a0"}, + {file = "multidict-6.0.4-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:7582a1d1030e15422262de9f58711774e02fa80df0d1578995c76214f6954988"}, + {file = "multidict-6.0.4-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:ddff9c4e225a63a5afab9dd15590432c22e8057e1a9a13d28ed128ecf047bbdc"}, + {file = "multidict-6.0.4-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:ee2a1ece51b9b9e7752e742cfb661d2a29e7bcdba2d27e66e28a99f1890e4fa0"}, + {file = "multidict-6.0.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:a2e4369eb3d47d2034032a26c7a80fcb21a2cb22e1173d761a162f11e562caa5"}, + {file = "multidict-6.0.4-cp310-cp310-win32.whl", hash = "sha256:574b7eae1ab267e5f8285f0fe881f17efe4b98c39a40858247720935b893bba8"}, + {file = "multidict-6.0.4-cp310-cp310-win_amd64.whl", hash = "sha256:4dcbb0906e38440fa3e325df2359ac6cb043df8e58c965bb45f4e406ecb162cc"}, + {file = "multidict-6.0.4-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:0dfad7a5a1e39c53ed00d2dd0c2e36aed4650936dc18fd9a1826a5ae1cad6f03"}, + {file = "multidict-6.0.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:64da238a09d6039e3bd39bb3aee9c21a5e34f28bfa5aa22518581f910ff94af3"}, + {file = "multidict-6.0.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ff959bee35038c4624250473988b24f846cbeb2c6639de3602c073f10410ceba"}, + {file = "multidict-6.0.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:01a3a55bd90018c9c080fbb0b9f4891db37d148a0a18722b42f94694f8b6d4c9"}, + {file = "multidict-6.0.4-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c5cb09abb18c1ea940fb99360ea0396f34d46566f157122c92dfa069d3e0e982"}, + {file = "multidict-6.0.4-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:666daae833559deb2d609afa4490b85830ab0dfca811a98b70a205621a6109fe"}, + {file = "multidict-6.0.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:11bdf3f5e1518b24530b8241529d2050014c884cf18b6fc69c0c2b30ca248710"}, + {file = "multidict-6.0.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7d18748f2d30f94f498e852c67d61261c643b349b9d2a581131725595c45ec6c"}, + {file = "multidict-6.0.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:458f37be2d9e4c95e2d8866a851663cbc76e865b78395090786f6cd9b3bbf4f4"}, + {file = "multidict-6.0.4-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:b1a2eeedcead3a41694130495593a559a668f382eee0727352b9a41e1c45759a"}, + {file = "multidict-6.0.4-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:7d6ae9d593ef8641544d6263c7fa6408cc90370c8cb2bbb65f8d43e5b0351d9c"}, + {file = "multidict-6.0.4-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:5979b5632c3e3534e42ca6ff856bb24b2e3071b37861c2c727ce220d80eee9ed"}, + {file = "multidict-6.0.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:dcfe792765fab89c365123c81046ad4103fcabbc4f56d1c1997e6715e8015461"}, + {file = "multidict-6.0.4-cp311-cp311-win32.whl", hash = "sha256:3601a3cece3819534b11d4efc1eb76047488fddd0c85a3948099d5da4d504636"}, + {file = "multidict-6.0.4-cp311-cp311-win_amd64.whl", hash = "sha256:81a4f0b34bd92df3da93315c6a59034df95866014ac08535fc819f043bfd51f0"}, + {file = "multidict-6.0.4-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:67040058f37a2a51ed8ea8f6b0e6ee5bd78ca67f169ce6122f3e2ec80dfe9b78"}, + {file = "multidict-6.0.4-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:853888594621e6604c978ce2a0444a1e6e70c8d253ab65ba11657659dcc9100f"}, + {file = "multidict-6.0.4-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:39ff62e7d0f26c248b15e364517a72932a611a9b75f35b45be078d81bdb86603"}, + {file = "multidict-6.0.4-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:af048912e045a2dc732847d33821a9d84ba553f5c5f028adbd364dd4765092ac"}, + {file = "multidict-6.0.4-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b1e8b901e607795ec06c9e42530788c45ac21ef3aaa11dbd0c69de543bfb79a9"}, + {file = "multidict-6.0.4-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:62501642008a8b9871ddfccbf83e4222cf8ac0d5aeedf73da36153ef2ec222d2"}, + {file = "multidict-6.0.4-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:99b76c052e9f1bc0721f7541e5e8c05db3941eb9ebe7b8553c625ef88d6eefde"}, + {file = "multidict-6.0.4-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:509eac6cf09c794aa27bcacfd4d62c885cce62bef7b2c3e8b2e49d365b5003fe"}, + {file = "multidict-6.0.4-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:21a12c4eb6ddc9952c415f24eef97e3e55ba3af61f67c7bc388dcdec1404a067"}, + {file = "multidict-6.0.4-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:5cad9430ab3e2e4fa4a2ef4450f548768400a2ac635841bc2a56a2052cdbeb87"}, + {file = "multidict-6.0.4-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:ab55edc2e84460694295f401215f4a58597f8f7c9466faec545093045476327d"}, + {file = "multidict-6.0.4-cp37-cp37m-win32.whl", hash = "sha256:5a4dcf02b908c3b8b17a45fb0f15b695bf117a67b76b7ad18b73cf8e92608775"}, + {file = "multidict-6.0.4-cp37-cp37m-win_amd64.whl", hash = "sha256:6ed5f161328b7df384d71b07317f4d8656434e34591f20552c7bcef27b0ab88e"}, + {file = "multidict-6.0.4-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:5fc1b16f586f049820c5c5b17bb4ee7583092fa0d1c4e28b5239181ff9532e0c"}, + {file = "multidict-6.0.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1502e24330eb681bdaa3eb70d6358e818e8e8f908a22a1851dfd4e15bc2f8161"}, + {file = "multidict-6.0.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:b692f419760c0e65d060959df05f2a531945af31fda0c8a3b3195d4efd06de11"}, + {file = "multidict-6.0.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:45e1ecb0379bfaab5eef059f50115b54571acfbe422a14f668fc8c27ba410e7e"}, + {file = "multidict-6.0.4-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ddd3915998d93fbcd2566ddf9cf62cdb35c9e093075f862935573d265cf8f65d"}, + {file = "multidict-6.0.4-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:59d43b61c59d82f2effb39a93c48b845efe23a3852d201ed2d24ba830d0b4cf2"}, + {file = "multidict-6.0.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cc8e1d0c705233c5dd0c5e6460fbad7827d5d36f310a0fadfd45cc3029762258"}, + {file = "multidict-6.0.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d6aa0418fcc838522256761b3415822626f866758ee0bc6632c9486b179d0b52"}, + {file = "multidict-6.0.4-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:6748717bb10339c4760c1e63da040f5f29f5ed6e59d76daee30305894069a660"}, + {file = "multidict-6.0.4-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:4d1a3d7ef5e96b1c9e92f973e43aa5e5b96c659c9bc3124acbbd81b0b9c8a951"}, + {file = "multidict-6.0.4-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:4372381634485bec7e46718edc71528024fcdc6f835baefe517b34a33c731d60"}, + {file = "multidict-6.0.4-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:fc35cb4676846ef752816d5be2193a1e8367b4c1397b74a565a9d0389c433a1d"}, + {file = "multidict-6.0.4-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:4b9d9e4e2b37daddb5c23ea33a3417901fa7c7b3dee2d855f63ee67a0b21e5b1"}, + {file = "multidict-6.0.4-cp38-cp38-win32.whl", hash = "sha256:e41b7e2b59679edfa309e8db64fdf22399eec4b0b24694e1b2104fb789207779"}, + {file = "multidict-6.0.4-cp38-cp38-win_amd64.whl", hash = "sha256:d6c254ba6e45d8e72739281ebc46ea5eb5f101234f3ce171f0e9f5cc86991480"}, + {file = "multidict-6.0.4-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:16ab77bbeb596e14212e7bab8429f24c1579234a3a462105cda4a66904998664"}, + {file = "multidict-6.0.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:bc779e9e6f7fda81b3f9aa58e3a6091d49ad528b11ed19f6621408806204ad35"}, + {file = "multidict-6.0.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4ceef517eca3e03c1cceb22030a3e39cb399ac86bff4e426d4fc6ae49052cc60"}, + {file = "multidict-6.0.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:281af09f488903fde97923c7744bb001a9b23b039a909460d0f14edc7bf59706"}, + {file = "multidict-6.0.4-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:52f2dffc8acaba9a2f27174c41c9e57f60b907bb9f096b36b1a1f3be71c6284d"}, + {file = "multidict-6.0.4-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b41156839806aecb3641f3208c0dafd3ac7775b9c4c422d82ee2a45c34ba81ca"}, + {file = "multidict-6.0.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d5e3fc56f88cc98ef8139255cf8cd63eb2c586531e43310ff859d6bb3a6b51f1"}, + {file = "multidict-6.0.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8316a77808c501004802f9beebde51c9f857054a0c871bd6da8280e718444449"}, + {file = "multidict-6.0.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:f70b98cd94886b49d91170ef23ec5c0e8ebb6f242d734ed7ed677b24d50c82cf"}, + {file = "multidict-6.0.4-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:bf6774e60d67a9efe02b3616fee22441d86fab4c6d335f9d2051d19d90a40063"}, + {file = "multidict-6.0.4-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:e69924bfcdda39b722ef4d9aa762b2dd38e4632b3641b1d9a57ca9cd18f2f83a"}, + {file = "multidict-6.0.4-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:6b181d8c23da913d4ff585afd1155a0e1194c0b50c54fcfe286f70cdaf2b7176"}, + {file = "multidict-6.0.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:52509b5be062d9eafc8170e53026fbc54cf3b32759a23d07fd935fb04fc22d95"}, + {file = "multidict-6.0.4-cp39-cp39-win32.whl", hash = "sha256:27c523fbfbdfd19c6867af7346332b62b586eed663887392cff78d614f9ec313"}, + {file = "multidict-6.0.4-cp39-cp39-win_amd64.whl", hash = "sha256:33029f5734336aa0d4c0384525da0387ef89148dc7191aae00ca5fb23d7aafc2"}, + {file = "multidict-6.0.4.tar.gz", hash = "sha256:3666906492efb76453c0e7b97f2cf459b0682e7402c0489a95484965dbc1da49"}, +] + +[[package]] +name = "mypy-boto3-glue" +version = "1.28.36" +description = "Type annotations for boto3.Glue 1.28.36 service generated with mypy-boto3-builder 7.18.0" +optional = true +python-versions = ">=3.7" +files = [ + {file = "mypy-boto3-glue-1.28.36.tar.gz", hash = "sha256:161771252bb6a220a0bfd8e6ad71da8548599c611f95fe8a94846f4a3386d2ae"}, + {file = "mypy_boto3_glue-1.28.36-py3-none-any.whl", hash = "sha256:73bc14616ac65a5c02adea5efba7bbbcf8207cd0c0e3237c13d351ebc916338d"}, +] + +[package.dependencies] +typing-extensions = {version = ">=4.1.0", markers = "python_version < \"3.12\""} + +[[package]] +name = "nodeenv" +version = "1.8.0" +description = "Node.js virtual environment builder" +optional = false +python-versions = ">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*" +files = [ + {file = "nodeenv-1.8.0-py2.py3-none-any.whl", hash = "sha256:df865724bb3c3adc86b3876fa209771517b0cfe596beff01a92700e0e8be4cec"}, + {file = "nodeenv-1.8.0.tar.gz", hash = "sha256:d51e0c37e64fbf47d017feac3145cdbb58836d7eee8c6f6d3b6880c5456227d2"}, +] + +[package.dependencies] +setuptools = "*" + +[[package]] +name = "numpy" +version = "1.24.4" +description = "Fundamental package for array computing in Python" +optional = true +python-versions = ">=3.8" +files = [ + {file = "numpy-1.24.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c0bfb52d2169d58c1cdb8cc1f16989101639b34c7d3ce60ed70b19c63eba0b64"}, + {file = "numpy-1.24.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ed094d4f0c177b1b8e7aa9cba7d6ceed51c0e569a5318ac0ca9a090680a6a1b1"}, + {file = "numpy-1.24.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:79fc682a374c4a8ed08b331bef9c5f582585d1048fa6d80bc6c35bc384eee9b4"}, + {file = "numpy-1.24.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7ffe43c74893dbf38c2b0a1f5428760a1a9c98285553c89e12d70a96a7f3a4d6"}, + {file = "numpy-1.24.4-cp310-cp310-win32.whl", hash = "sha256:4c21decb6ea94057331e111a5bed9a79d335658c27ce2adb580fb4d54f2ad9bc"}, + {file = "numpy-1.24.4-cp310-cp310-win_amd64.whl", hash = "sha256:b4bea75e47d9586d31e892a7401f76e909712a0fd510f58f5337bea9572c571e"}, + {file = "numpy-1.24.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:f136bab9c2cfd8da131132c2cf6cc27331dd6fae65f95f69dcd4ae3c3639c810"}, + {file = "numpy-1.24.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e2926dac25b313635e4d6cf4dc4e51c8c0ebfed60b801c799ffc4c32bf3d1254"}, + {file = "numpy-1.24.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:222e40d0e2548690405b0b3c7b21d1169117391c2e82c378467ef9ab4c8f0da7"}, + {file = "numpy-1.24.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7215847ce88a85ce39baf9e89070cb860c98fdddacbaa6c0da3ffb31b3350bd5"}, + {file = "numpy-1.24.4-cp311-cp311-win32.whl", hash = "sha256:4979217d7de511a8d57f4b4b5b2b965f707768440c17cb70fbf254c4b225238d"}, + {file = "numpy-1.24.4-cp311-cp311-win_amd64.whl", hash = "sha256:b7b1fc9864d7d39e28f41d089bfd6353cb5f27ecd9905348c24187a768c79694"}, + {file = "numpy-1.24.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1452241c290f3e2a312c137a9999cdbf63f78864d63c79039bda65ee86943f61"}, + {file = "numpy-1.24.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:04640dab83f7c6c85abf9cd729c5b65f1ebd0ccf9de90b270cd61935eef0197f"}, + {file = "numpy-1.24.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a5425b114831d1e77e4b5d812b69d11d962e104095a5b9c3b641a218abcc050e"}, + {file = "numpy-1.24.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd80e219fd4c71fc3699fc1dadac5dcf4fd882bfc6f7ec53d30fa197b8ee22dc"}, + {file = "numpy-1.24.4-cp38-cp38-win32.whl", hash = "sha256:4602244f345453db537be5314d3983dbf5834a9701b7723ec28923e2889e0bb2"}, + {file = "numpy-1.24.4-cp38-cp38-win_amd64.whl", hash = "sha256:692f2e0f55794943c5bfff12b3f56f99af76f902fc47487bdfe97856de51a706"}, + {file = "numpy-1.24.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:2541312fbf09977f3b3ad449c4e5f4bb55d0dbf79226d7724211acc905049400"}, + {file = "numpy-1.24.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:9667575fb6d13c95f1b36aca12c5ee3356bf001b714fc354eb5465ce1609e62f"}, + {file = "numpy-1.24.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f3a86ed21e4f87050382c7bc96571755193c4c1392490744ac73d660e8f564a9"}, + {file = "numpy-1.24.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d11efb4dbecbdf22508d55e48d9c8384db795e1b7b51ea735289ff96613ff74d"}, + {file = "numpy-1.24.4-cp39-cp39-win32.whl", hash = "sha256:6620c0acd41dbcb368610bb2f4d83145674040025e5536954782467100aa8835"}, + {file = "numpy-1.24.4-cp39-cp39-win_amd64.whl", hash = "sha256:befe2bf740fd8373cf56149a5c23a0f601e82869598d41f8e188a0e9869926f8"}, + {file = "numpy-1.24.4-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:31f13e25b4e304632a4619d0e0777662c2ffea99fcae2029556b17d8ff958aef"}, + {file = "numpy-1.24.4-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:95f7ac6540e95bc440ad77f56e520da5bf877f87dca58bd095288dce8940532a"}, + {file = "numpy-1.24.4-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:e98f220aa76ca2a977fe435f5b04d7b3470c0a2e6312907b37ba6068f26787f2"}, + {file = "numpy-1.24.4.tar.gz", hash = "sha256:80f5e3a4e498641401868df4208b74581206afbee7cf7b8329daae82676d9463"}, +] + +[[package]] +name = "numpy" +version = "1.25.2" +description = "Fundamental package for array computing in Python" +optional = true +python-versions = ">=3.9" +files = [ + {file = "numpy-1.25.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:db3ccc4e37a6873045580d413fe79b68e47a681af8db2e046f1dacfa11f86eb3"}, + {file = "numpy-1.25.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:90319e4f002795ccfc9050110bbbaa16c944b1c37c0baeea43c5fb881693ae1f"}, + {file = "numpy-1.25.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dfe4a913e29b418d096e696ddd422d8a5d13ffba4ea91f9f60440a3b759b0187"}, + {file = "numpy-1.25.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f08f2e037bba04e707eebf4bc934f1972a315c883a9e0ebfa8a7756eabf9e357"}, + {file = "numpy-1.25.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:bec1e7213c7cb00d67093247f8c4db156fd03075f49876957dca4711306d39c9"}, + {file = "numpy-1.25.2-cp310-cp310-win32.whl", hash = "sha256:7dc869c0c75988e1c693d0e2d5b26034644399dd929bc049db55395b1379e044"}, + {file = "numpy-1.25.2-cp310-cp310-win_amd64.whl", hash = "sha256:834b386f2b8210dca38c71a6e0f4fd6922f7d3fcff935dbe3a570945acb1b545"}, + {file = "numpy-1.25.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c5462d19336db4560041517dbb7759c21d181a67cb01b36ca109b2ae37d32418"}, + {file = "numpy-1.25.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:c5652ea24d33585ea39eb6a6a15dac87a1206a692719ff45d53c5282e66d4a8f"}, + {file = "numpy-1.25.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0d60fbae8e0019865fc4784745814cff1c421df5afee233db6d88ab4f14655a2"}, + {file = "numpy-1.25.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:60e7f0f7f6d0eee8364b9a6304c2845b9c491ac706048c7e8cf47b83123b8dbf"}, + {file = "numpy-1.25.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:bb33d5a1cf360304754913a350edda36d5b8c5331a8237268c48f91253c3a364"}, + {file = "numpy-1.25.2-cp311-cp311-win32.whl", hash = "sha256:5883c06bb92f2e6c8181df7b39971a5fb436288db58b5a1c3967702d4278691d"}, + {file = "numpy-1.25.2-cp311-cp311-win_amd64.whl", hash = "sha256:5c97325a0ba6f9d041feb9390924614b60b99209a71a69c876f71052521d42a4"}, + {file = "numpy-1.25.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b79e513d7aac42ae918db3ad1341a015488530d0bb2a6abcbdd10a3a829ccfd3"}, + {file = "numpy-1.25.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:eb942bfb6f84df5ce05dbf4b46673ffed0d3da59f13635ea9b926af3deb76926"}, + {file = "numpy-1.25.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3e0746410e73384e70d286f93abf2520035250aad8c5714240b0492a7302fdca"}, + {file = "numpy-1.25.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d7806500e4f5bdd04095e849265e55de20d8cc4b661b038957354327f6d9b295"}, + {file = "numpy-1.25.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:8b77775f4b7df768967a7c8b3567e309f617dd5e99aeb886fa14dc1a0791141f"}, + {file = "numpy-1.25.2-cp39-cp39-win32.whl", hash = "sha256:2792d23d62ec51e50ce4d4b7d73de8f67a2fd3ea710dcbc8563a51a03fb07b01"}, + {file = "numpy-1.25.2-cp39-cp39-win_amd64.whl", hash = "sha256:76b4115d42a7dfc5d485d358728cdd8719be33cc5ec6ec08632a5d6fca2ed380"}, + {file = "numpy-1.25.2-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:1a1329e26f46230bf77b02cc19e900db9b52f398d6722ca853349a782d4cff55"}, + {file = "numpy-1.25.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4c3abc71e8b6edba80a01a52e66d83c5d14433cbcd26a40c329ec7ed09f37901"}, + {file = "numpy-1.25.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:1b9735c27cea5d995496f46a8b1cd7b408b3f34b6d50459d9ac8fe3a20cc17bf"}, + {file = "numpy-1.25.2.tar.gz", hash = "sha256:fd608e19c8d7c55021dffd43bfe5492fab8cc105cc8986f813f8c3c048b38760"}, +] + +[[package]] +name = "oauthlib" +version = "3.2.2" +description = "A generic, spec-compliant, thorough implementation of the OAuth request-signing logic" +optional = true +python-versions = ">=3.6" +files = [ + {file = "oauthlib-3.2.2-py3-none-any.whl", hash = "sha256:8139f29aac13e25d502680e9e19963e83f16838d48a0d71c287fe40e7067fbca"}, + {file = "oauthlib-3.2.2.tar.gz", hash = "sha256:9859c40929662bec5d64f34d01c99e093149682a3f38915dc0655d5a633dd918"}, +] + +[package.extras] +rsa = ["cryptography (>=3.0.0)"] +signals = ["blinker (>=1.4.0)"] +signedtoken = ["cryptography (>=3.0.0)", "pyjwt (>=2.0.0,<3)"] + +[[package]] +name = "packaging" +version = "23.1" +description = "Core utilities for Python packages" +optional = false +python-versions = ">=3.7" +files = [ + {file = "packaging-23.1-py3-none-any.whl", hash = "sha256:994793af429502c4ea2ebf6bf664629d07c1a9fe974af92966e4b8d2df7edc61"}, + {file = "packaging-23.1.tar.gz", hash = "sha256:a392980d2b6cffa644431898be54b0045151319d1e7ec34f0cfed48767dd334f"}, +] + +[[package]] +name = "pandas" +version = "2.0.3" +description = "Powerful data structures for data analysis, time series, and statistics" +optional = true +python-versions = ">=3.8" +files = [ + {file = "pandas-2.0.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e4c7c9f27a4185304c7caf96dc7d91bc60bc162221152de697c98eb0b2648dd8"}, + {file = "pandas-2.0.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f167beed68918d62bffb6ec64f2e1d8a7d297a038f86d4aed056b9493fca407f"}, + {file = "pandas-2.0.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ce0c6f76a0f1ba361551f3e6dceaff06bde7514a374aa43e33b588ec10420183"}, + {file = "pandas-2.0.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba619e410a21d8c387a1ea6e8a0e49bb42216474436245718d7f2e88a2f8d7c0"}, + {file = "pandas-2.0.3-cp310-cp310-win32.whl", hash = "sha256:3ef285093b4fe5058eefd756100a367f27029913760773c8bf1d2d8bebe5d210"}, + {file = "pandas-2.0.3-cp310-cp310-win_amd64.whl", hash = "sha256:9ee1a69328d5c36c98d8e74db06f4ad518a1840e8ccb94a4ba86920986bb617e"}, + {file = "pandas-2.0.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b084b91d8d66ab19f5bb3256cbd5ea661848338301940e17f4492b2ce0801fe8"}, + {file = "pandas-2.0.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:37673e3bdf1551b95bf5d4ce372b37770f9529743d2498032439371fc7b7eb26"}, + {file = "pandas-2.0.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b9cb1e14fdb546396b7e1b923ffaeeac24e4cedd14266c3497216dd4448e4f2d"}, + {file = "pandas-2.0.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d9cd88488cceb7635aebb84809d087468eb33551097d600c6dad13602029c2df"}, + {file = "pandas-2.0.3-cp311-cp311-win32.whl", hash = "sha256:694888a81198786f0e164ee3a581df7d505024fbb1f15202fc7db88a71d84ebd"}, + {file = "pandas-2.0.3-cp311-cp311-win_amd64.whl", hash = "sha256:6a21ab5c89dcbd57f78d0ae16630b090eec626360085a4148693def5452d8a6b"}, + {file = "pandas-2.0.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:9e4da0d45e7f34c069fe4d522359df7d23badf83abc1d1cef398895822d11061"}, + {file = "pandas-2.0.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:32fca2ee1b0d93dd71d979726b12b61faa06aeb93cf77468776287f41ff8fdc5"}, + {file = "pandas-2.0.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:258d3624b3ae734490e4d63c430256e716f488c4fcb7c8e9bde2d3aa46c29089"}, + {file = "pandas-2.0.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9eae3dc34fa1aa7772dd3fc60270d13ced7346fcbcfee017d3132ec625e23bb0"}, + {file = "pandas-2.0.3-cp38-cp38-win32.whl", hash = "sha256:f3421a7afb1a43f7e38e82e844e2bca9a6d793d66c1a7f9f0ff39a795bbc5e02"}, + {file = "pandas-2.0.3-cp38-cp38-win_amd64.whl", hash = "sha256:69d7f3884c95da3a31ef82b7618af5710dba95bb885ffab339aad925c3e8ce78"}, + {file = "pandas-2.0.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5247fb1ba347c1261cbbf0fcfba4a3121fbb4029d95d9ef4dc45406620b25c8b"}, + {file = "pandas-2.0.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:81af086f4543c9d8bb128328b5d32e9986e0c84d3ee673a2ac6fb57fd14f755e"}, + {file = "pandas-2.0.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1994c789bf12a7c5098277fb43836ce090f1073858c10f9220998ac74f37c69b"}, + {file = "pandas-2.0.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5ec591c48e29226bcbb316e0c1e9423622bc7a4eaf1ef7c3c9fa1a3981f89641"}, + {file = "pandas-2.0.3-cp39-cp39-win32.whl", hash = "sha256:04dbdbaf2e4d46ca8da896e1805bc04eb85caa9a82e259e8eed00254d5e0c682"}, + {file = "pandas-2.0.3-cp39-cp39-win_amd64.whl", hash = "sha256:1168574b036cd8b93abc746171c9b4f1b83467438a5e45909fed645cf8692dbc"}, + {file = "pandas-2.0.3.tar.gz", hash = "sha256:c02f372a88e0d17f36d3093a644c73cfc1788e876a7c4bcb4020a77512e2043c"}, +] + +[package.dependencies] +numpy = [ + {version = ">=1.20.3", markers = "python_version < \"3.10\""}, + {version = ">=1.23.2", markers = "python_version >= \"3.11\""}, + {version = ">=1.21.0", markers = "python_version >= \"3.10\" and python_version < \"3.11\""}, +] +python-dateutil = ">=2.8.2" +pytz = ">=2020.1" +tzdata = ">=2022.1" + +[package.extras] +all = ["PyQt5 (>=5.15.1)", "SQLAlchemy (>=1.4.16)", "beautifulsoup4 (>=4.9.3)", "bottleneck (>=1.3.2)", "brotlipy (>=0.7.0)", "fastparquet (>=0.6.3)", "fsspec (>=2021.07.0)", "gcsfs (>=2021.07.0)", "html5lib (>=1.1)", "hypothesis (>=6.34.2)", "jinja2 (>=3.0.0)", "lxml (>=4.6.3)", "matplotlib (>=3.6.1)", "numba (>=0.53.1)", "numexpr (>=2.7.3)", "odfpy (>=1.4.1)", "openpyxl (>=3.0.7)", "pandas-gbq (>=0.15.0)", "psycopg2 (>=2.8.6)", "pyarrow (>=7.0.0)", "pymysql (>=1.0.2)", "pyreadstat (>=1.1.2)", "pytest (>=7.3.2)", "pytest-asyncio (>=0.17.0)", "pytest-xdist (>=2.2.0)", "python-snappy (>=0.6.0)", "pyxlsb (>=1.0.8)", "qtpy (>=2.2.0)", "s3fs (>=2021.08.0)", "scipy (>=1.7.1)", "tables (>=3.6.1)", "tabulate (>=0.8.9)", "xarray (>=0.21.0)", "xlrd (>=2.0.1)", "xlsxwriter (>=1.4.3)", "zstandard (>=0.15.2)"] +aws = ["s3fs (>=2021.08.0)"] +clipboard = ["PyQt5 (>=5.15.1)", "qtpy (>=2.2.0)"] +compression = ["brotlipy (>=0.7.0)", "python-snappy (>=0.6.0)", "zstandard (>=0.15.2)"] +computation = ["scipy (>=1.7.1)", "xarray (>=0.21.0)"] +excel = ["odfpy (>=1.4.1)", "openpyxl (>=3.0.7)", "pyxlsb (>=1.0.8)", "xlrd (>=2.0.1)", "xlsxwriter (>=1.4.3)"] +feather = ["pyarrow (>=7.0.0)"] +fss = ["fsspec (>=2021.07.0)"] +gcp = ["gcsfs (>=2021.07.0)", "pandas-gbq (>=0.15.0)"] +hdf5 = ["tables (>=3.6.1)"] +html = ["beautifulsoup4 (>=4.9.3)", "html5lib (>=1.1)", "lxml (>=4.6.3)"] +mysql = ["SQLAlchemy (>=1.4.16)", "pymysql (>=1.0.2)"] +output-formatting = ["jinja2 (>=3.0.0)", "tabulate (>=0.8.9)"] +parquet = ["pyarrow (>=7.0.0)"] +performance = ["bottleneck (>=1.3.2)", "numba (>=0.53.1)", "numexpr (>=2.7.1)"] +plot = ["matplotlib (>=3.6.1)"] +postgresql = ["SQLAlchemy (>=1.4.16)", "psycopg2 (>=2.8.6)"] +spss = ["pyreadstat (>=1.1.2)"] +sql-other = ["SQLAlchemy (>=1.4.16)"] +test = ["hypothesis (>=6.34.2)", "pytest (>=7.3.2)", "pytest-asyncio (>=0.17.0)", "pytest-xdist (>=2.2.0)"] +xml = ["lxml (>=4.6.3)"] + +[[package]] +name = "pkgutil-resolve-name" +version = "1.3.10" +description = "Resolve a name to an object." +optional = true +python-versions = ">=3.6" +files = [ + {file = "pkgutil_resolve_name-1.3.10-py3-none-any.whl", hash = "sha256:ca27cc078d25c5ad71a9de0a7a330146c4e014c2462d9af19c6b828280649c5e"}, + {file = "pkgutil_resolve_name-1.3.10.tar.gz", hash = "sha256:357d6c9e6a755653cfd78893817c0853af365dd51ec97f3d358a819373bbd174"}, +] + +[[package]] +name = "platformdirs" +version = "3.10.0" +description = "A small Python package for determining appropriate platform-specific dirs, e.g. a \"user data dir\"." +optional = false +python-versions = ">=3.7" +files = [ + {file = "platformdirs-3.10.0-py3-none-any.whl", hash = "sha256:d7c24979f292f916dc9cbf8648319032f551ea8c49a4c9bf2fb556a02070ec1d"}, + {file = "platformdirs-3.10.0.tar.gz", hash = "sha256:b45696dab2d7cc691a3226759c0d3b00c47c8b6e293d96f6436f733303f77f6d"}, +] + +[package.extras] +docs = ["furo (>=2023.7.26)", "proselint (>=0.13)", "sphinx (>=7.1.1)", "sphinx-autodoc-typehints (>=1.24)"] +test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=7.4)", "pytest-cov (>=4.1)", "pytest-mock (>=3.11.1)"] + +[[package]] +name = "pluggy" +version = "1.3.0" +description = "plugin and hook calling mechanisms for python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pluggy-1.3.0-py3-none-any.whl", hash = "sha256:d89c696a773f8bd377d18e5ecda92b7a3793cbe66c87060a6fb58c7b6e1061f7"}, + {file = "pluggy-1.3.0.tar.gz", hash = "sha256:cf61ae8f126ac6f7c451172cf30e3e43d3ca77615509771b3a984a0730651e12"}, +] + +[package.extras] +dev = ["pre-commit", "tox"] +testing = ["pytest", "pytest-benchmark"] + +[[package]] +name = "portalocker" +version = "2.8.2" +description = "Wraps the portalocker recipe for easy usage" +optional = true +python-versions = ">=3.8" +files = [ + {file = "portalocker-2.8.2-py3-none-any.whl", hash = "sha256:cfb86acc09b9aa7c3b43594e19be1345b9d16af3feb08bf92f23d4dce513a28e"}, + {file = "portalocker-2.8.2.tar.gz", hash = "sha256:2b035aa7828e46c58e9b31390ee1f169b98e1066ab10b9a6a861fe7e25ee4f33"}, +] + +[package.dependencies] +pywin32 = {version = ">=226", markers = "platform_system == \"Windows\""} + +[package.extras] +docs = ["sphinx (>=1.7.1)"] +redis = ["redis"] +tests = ["pytest (>=5.4.1)", "pytest-cov (>=2.8.1)", "pytest-mypy (>=0.8.0)", "pytest-timeout (>=2.1.0)", "redis", "sphinx (>=6.0.0)", "types-redis"] + +[[package]] +name = "pre-commit" +version = "3.4.0" +description = "A framework for managing and maintaining multi-language pre-commit hooks." +optional = false +python-versions = ">=3.8" +files = [ + {file = "pre_commit-3.4.0-py2.py3-none-any.whl", hash = "sha256:96d529a951f8b677f730a7212442027e8ba53f9b04d217c4c67dc56c393ad945"}, + {file = "pre_commit-3.4.0.tar.gz", hash = "sha256:6bbd5129a64cad4c0dfaeeb12cd8f7ea7e15b77028d985341478c8af3c759522"}, +] + +[package.dependencies] +cfgv = ">=2.0.0" +identify = ">=1.0.0" +nodeenv = ">=0.11.1" +pyyaml = ">=5.1" +virtualenv = ">=20.10.0" + +[[package]] +name = "protobuf" +version = "4.24.3" +description = "" +optional = true +python-versions = ">=3.7" +files = [ + {file = "protobuf-4.24.3-cp310-abi3-win32.whl", hash = "sha256:20651f11b6adc70c0f29efbe8f4a94a74caf61b6200472a9aea6e19898f9fcf4"}, + {file = "protobuf-4.24.3-cp310-abi3-win_amd64.whl", hash = "sha256:3d42e9e4796a811478c783ef63dc85b5a104b44aaaca85d4864d5b886e4b05e3"}, + {file = "protobuf-4.24.3-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:6e514e8af0045be2b56e56ae1bb14f43ce7ffa0f68b1c793670ccbe2c4fc7d2b"}, + {file = "protobuf-4.24.3-cp37-abi3-manylinux2014_aarch64.whl", hash = "sha256:ba53c2f04798a326774f0e53b9c759eaef4f6a568ea7072ec6629851c8435959"}, + {file = "protobuf-4.24.3-cp37-abi3-manylinux2014_x86_64.whl", hash = "sha256:f6ccbcf027761a2978c1406070c3788f6de4a4b2cc20800cc03d52df716ad675"}, + {file = "protobuf-4.24.3-cp37-cp37m-win32.whl", hash = "sha256:1b182c7181a2891e8f7f3a1b5242e4ec54d1f42582485a896e4de81aa17540c2"}, + {file = "protobuf-4.24.3-cp37-cp37m-win_amd64.whl", hash = "sha256:b0271a701e6782880d65a308ba42bc43874dabd1a0a0f41f72d2dac3b57f8e76"}, + {file = "protobuf-4.24.3-cp38-cp38-win32.whl", hash = "sha256:e29d79c913f17a60cf17c626f1041e5288e9885c8579832580209de8b75f2a52"}, + {file = "protobuf-4.24.3-cp38-cp38-win_amd64.whl", hash = "sha256:067f750169bc644da2e1ef18c785e85071b7c296f14ac53e0900e605da588719"}, + {file = "protobuf-4.24.3-cp39-cp39-win32.whl", hash = "sha256:2da777d34b4f4f7613cdf85c70eb9a90b1fbef9d36ae4a0ccfe014b0b07906f1"}, + {file = "protobuf-4.24.3-cp39-cp39-win_amd64.whl", hash = "sha256:f631bb982c5478e0c1c70eab383af74a84be66945ebf5dd6b06fc90079668d0b"}, + {file = "protobuf-4.24.3-py3-none-any.whl", hash = "sha256:f6f8dc65625dadaad0c8545319c2e2f0424fede988368893ca3844261342c11a"}, + {file = "protobuf-4.24.3.tar.gz", hash = "sha256:12e9ad2ec079b833176d2921be2cb24281fa591f0b119b208b788adc48c2561d"}, +] + +[[package]] +name = "psycopg2-binary" +version = "2.9.7" +description = "psycopg2 - Python-PostgreSQL Database Adapter" +optional = true +python-versions = ">=3.6" +files = [ + {file = "psycopg2-binary-2.9.7.tar.gz", hash = "sha256:1b918f64a51ffe19cd2e230b3240ba481330ce1d4b7875ae67305bd1d37b041c"}, + {file = "psycopg2_binary-2.9.7-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:ea5f8ee87f1eddc818fc04649d952c526db4426d26bab16efbe5a0c52b27d6ab"}, + {file = "psycopg2_binary-2.9.7-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2993ccb2b7e80844d534e55e0f12534c2871952f78e0da33c35e648bf002bbff"}, + {file = "psycopg2_binary-2.9.7-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dbbc3c5d15ed76b0d9db7753c0db40899136ecfe97d50cbde918f630c5eb857a"}, + {file = "psycopg2_binary-2.9.7-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:692df8763b71d42eb8343f54091368f6f6c9cfc56dc391858cdb3c3ef1e3e584"}, + {file = "psycopg2_binary-2.9.7-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9dcfd5d37e027ec393a303cc0a216be564b96c80ba532f3d1e0d2b5e5e4b1e6e"}, + {file = "psycopg2_binary-2.9.7-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:17cc17a70dfb295a240db7f65b6d8153c3d81efb145d76da1e4a096e9c5c0e63"}, + {file = "psycopg2_binary-2.9.7-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:e5666632ba2b0d9757b38fc17337d84bdf932d38563c5234f5f8c54fd01349c9"}, + {file = "psycopg2_binary-2.9.7-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:7db7b9b701974c96a88997d458b38ccb110eba8f805d4b4f74944aac48639b42"}, + {file = "psycopg2_binary-2.9.7-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:c82986635a16fb1fa15cd5436035c88bc65c3d5ced1cfaac7f357ee9e9deddd4"}, + {file = "psycopg2_binary-2.9.7-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:4fe13712357d802080cfccbf8c6266a3121dc0e27e2144819029095ccf708372"}, + {file = "psycopg2_binary-2.9.7-cp310-cp310-win32.whl", hash = "sha256:122641b7fab18ef76b18860dd0c772290566b6fb30cc08e923ad73d17461dc63"}, + {file = "psycopg2_binary-2.9.7-cp310-cp310-win_amd64.whl", hash = "sha256:f8651cf1f144f9ee0fa7d1a1df61a9184ab72962531ca99f077bbdcba3947c58"}, + {file = "psycopg2_binary-2.9.7-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:4ecc15666f16f97709106d87284c136cdc82647e1c3f8392a672616aed3c7151"}, + {file = "psycopg2_binary-2.9.7-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3fbb1184c7e9d28d67671992970718c05af5f77fc88e26fd7136613c4ece1f89"}, + {file = "psycopg2_binary-2.9.7-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8a7968fd20bd550431837656872c19575b687f3f6f98120046228e451e4064df"}, + {file = "psycopg2_binary-2.9.7-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:094af2e77a1976efd4956a031028774b827029729725e136514aae3cdf49b87b"}, + {file = "psycopg2_binary-2.9.7-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:26484e913d472ecb6b45937ea55ce29c57c662066d222fb0fbdc1fab457f18c5"}, + {file = "psycopg2_binary-2.9.7-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f309b77a7c716e6ed9891b9b42953c3ff7d533dc548c1e33fddc73d2f5e21f9"}, + {file = "psycopg2_binary-2.9.7-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:6d92e139ca388ccfe8c04aacc163756e55ba4c623c6ba13d5d1595ed97523e4b"}, + {file = "psycopg2_binary-2.9.7-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:2df562bb2e4e00ee064779902d721223cfa9f8f58e7e52318c97d139cf7f012d"}, + {file = "psycopg2_binary-2.9.7-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:4eec5d36dbcfc076caab61a2114c12094c0b7027d57e9e4387b634e8ab36fd44"}, + {file = "psycopg2_binary-2.9.7-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:1011eeb0c51e5b9ea1016f0f45fa23aca63966a4c0afcf0340ccabe85a9f65bd"}, + {file = "psycopg2_binary-2.9.7-cp311-cp311-win32.whl", hash = "sha256:ded8e15f7550db9e75c60b3d9fcbc7737fea258a0f10032cdb7edc26c2a671fd"}, + {file = "psycopg2_binary-2.9.7-cp311-cp311-win_amd64.whl", hash = "sha256:8a136c8aaf6615653450817a7abe0fc01e4ea720ae41dfb2823eccae4b9062a3"}, + {file = "psycopg2_binary-2.9.7-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:2dec5a75a3a5d42b120e88e6ed3e3b37b46459202bb8e36cd67591b6e5feebc1"}, + {file = "psycopg2_binary-2.9.7-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fc10da7e7df3380426521e8c1ed975d22df678639da2ed0ec3244c3dc2ab54c8"}, + {file = "psycopg2_binary-2.9.7-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ee919b676da28f78f91b464fb3e12238bd7474483352a59c8a16c39dfc59f0c5"}, + {file = "psycopg2_binary-2.9.7-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:eb1c0e682138f9067a58fc3c9a9bf1c83d8e08cfbee380d858e63196466d5c86"}, + {file = "psycopg2_binary-2.9.7-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:00d8db270afb76f48a499f7bb8fa70297e66da67288471ca873db88382850bf4"}, + {file = "psycopg2_binary-2.9.7-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:9b0c2b466b2f4d89ccc33784c4ebb1627989bd84a39b79092e560e937a11d4ac"}, + {file = "psycopg2_binary-2.9.7-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:51d1b42d44f4ffb93188f9b39e6d1c82aa758fdb8d9de65e1ddfe7a7d250d7ad"}, + {file = "psycopg2_binary-2.9.7-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:11abdbfc6f7f7dea4a524b5f4117369b0d757725798f1593796be6ece20266cb"}, + {file = "psycopg2_binary-2.9.7-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:f02f4a72cc3ab2565c6d9720f0343cb840fb2dc01a2e9ecb8bc58ccf95dc5c06"}, + {file = "psycopg2_binary-2.9.7-cp37-cp37m-win32.whl", hash = "sha256:81d5dd2dd9ab78d31a451e357315f201d976c131ca7d43870a0e8063b6b7a1ec"}, + {file = "psycopg2_binary-2.9.7-cp37-cp37m-win_amd64.whl", hash = "sha256:62cb6de84d7767164a87ca97e22e5e0a134856ebcb08f21b621c6125baf61f16"}, + {file = "psycopg2_binary-2.9.7-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:59f7e9109a59dfa31efa022e94a244736ae401526682de504e87bd11ce870c22"}, + {file = "psycopg2_binary-2.9.7-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:95a7a747bdc3b010bb6a980f053233e7610276d55f3ca506afff4ad7749ab58a"}, + {file = "psycopg2_binary-2.9.7-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8c721ee464e45ecf609ff8c0a555018764974114f671815a0a7152aedb9f3343"}, + {file = "psycopg2_binary-2.9.7-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f4f37bbc6588d402980ffbd1f3338c871368fb4b1cfa091debe13c68bb3852b3"}, + {file = "psycopg2_binary-2.9.7-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ac83ab05e25354dad798401babaa6daa9577462136ba215694865394840e31f8"}, + {file = "psycopg2_binary-2.9.7-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:024eaeb2a08c9a65cd5f94b31ace1ee3bb3f978cd4d079406aef85169ba01f08"}, + {file = "psycopg2_binary-2.9.7-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:1c31c2606ac500dbd26381145684d87730a2fac9a62ebcfbaa2b119f8d6c19f4"}, + {file = "psycopg2_binary-2.9.7-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:42a62ef0e5abb55bf6ffb050eb2b0fcd767261fa3faf943a4267539168807522"}, + {file = "psycopg2_binary-2.9.7-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:7952807f95c8eba6a8ccb14e00bf170bb700cafcec3924d565235dffc7dc4ae8"}, + {file = "psycopg2_binary-2.9.7-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:e02bc4f2966475a7393bd0f098e1165d470d3fa816264054359ed4f10f6914ea"}, + {file = "psycopg2_binary-2.9.7-cp38-cp38-win32.whl", hash = "sha256:fdca0511458d26cf39b827a663d7d87db6f32b93efc22442a742035728603d5f"}, + {file = "psycopg2_binary-2.9.7-cp38-cp38-win_amd64.whl", hash = "sha256:d0b16e5bb0ab78583f0ed7ab16378a0f8a89a27256bb5560402749dbe8a164d7"}, + {file = "psycopg2_binary-2.9.7-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6822c9c63308d650db201ba22fe6648bd6786ca6d14fdaf273b17e15608d0852"}, + {file = "psycopg2_binary-2.9.7-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:8f94cb12150d57ea433e3e02aabd072205648e86f1d5a0a692d60242f7809b15"}, + {file = "psycopg2_binary-2.9.7-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a5ee89587696d808c9a00876065d725d4ae606f5f7853b961cdbc348b0f7c9a1"}, + {file = "psycopg2_binary-2.9.7-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ad5ec10b53cbb57e9a2e77b67e4e4368df56b54d6b00cc86398578f1c635f329"}, + {file = "psycopg2_binary-2.9.7-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:642df77484b2dcaf87d4237792246d8068653f9e0f5c025e2c692fc56b0dda70"}, + {file = "psycopg2_binary-2.9.7-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a6a8b575ac45af1eaccbbcdcf710ab984fd50af048fe130672377f78aaff6fc1"}, + {file = "psycopg2_binary-2.9.7-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:f955aa50d7d5220fcb6e38f69ea126eafecd812d96aeed5d5f3597f33fad43bb"}, + {file = "psycopg2_binary-2.9.7-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:ad26d4eeaa0d722b25814cce97335ecf1b707630258f14ac4d2ed3d1d8415265"}, + {file = "psycopg2_binary-2.9.7-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:ced63c054bdaf0298f62681d5dcae3afe60cbae332390bfb1acf0e23dcd25fc8"}, + {file = "psycopg2_binary-2.9.7-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:2b04da24cbde33292ad34a40db9832a80ad12de26486ffeda883413c9e1b1d5e"}, + {file = "psycopg2_binary-2.9.7-cp39-cp39-win32.whl", hash = "sha256:18f12632ab516c47c1ac4841a78fddea6508a8284c7cf0f292cb1a523f2e2379"}, + {file = "psycopg2_binary-2.9.7-cp39-cp39-win_amd64.whl", hash = "sha256:eb3b8d55924a6058a26db69fb1d3e7e32695ff8b491835ba9f479537e14dcf9f"}, +] + +[[package]] +name = "pyarrow" +version = "13.0.0" +description = "Python library for Apache Arrow" +optional = true +python-versions = ">=3.8" +files = [ + {file = "pyarrow-13.0.0-cp310-cp310-macosx_10_14_x86_64.whl", hash = "sha256:1afcc2c33f31f6fb25c92d50a86b7a9f076d38acbcb6f9e74349636109550148"}, + {file = "pyarrow-13.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:70fa38cdc66b2fc1349a082987f2b499d51d072faaa6b600f71931150de2e0e3"}, + {file = "pyarrow-13.0.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cd57b13a6466822498238877892a9b287b0a58c2e81e4bdb0b596dbb151cbb73"}, + {file = "pyarrow-13.0.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f8ce69f7bf01de2e2764e14df45b8404fc6f1a5ed9871e8e08a12169f87b7a26"}, + {file = "pyarrow-13.0.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:588f0d2da6cf1b1680974d63be09a6530fd1bd825dc87f76e162404779a157dc"}, + {file = "pyarrow-13.0.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:6241afd72b628787b4abea39e238e3ff9f34165273fad306c7acf780dd850956"}, + {file = "pyarrow-13.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:fda7857e35993673fcda603c07d43889fca60a5b254052a462653f8656c64f44"}, + {file = "pyarrow-13.0.0-cp311-cp311-macosx_10_14_x86_64.whl", hash = "sha256:aac0ae0146a9bfa5e12d87dda89d9ef7c57a96210b899459fc2f785303dcbb67"}, + {file = "pyarrow-13.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d7759994217c86c161c6a8060509cfdf782b952163569606bb373828afdd82e8"}, + {file = "pyarrow-13.0.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:868a073fd0ff6468ae7d869b5fc1f54de5c4255b37f44fb890385eb68b68f95d"}, + {file = "pyarrow-13.0.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:51be67e29f3cfcde263a113c28e96aa04362ed8229cb7c6e5f5c719003659d33"}, + {file = "pyarrow-13.0.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:d1b4e7176443d12610874bb84d0060bf080f000ea9ed7c84b2801df851320295"}, + {file = "pyarrow-13.0.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:69b6f9a089d116a82c3ed819eea8fe67dae6105f0d81eaf0fdd5e60d0c6e0944"}, + {file = "pyarrow-13.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:ab1268db81aeb241200e321e220e7cd769762f386f92f61b898352dd27e402ce"}, + {file = "pyarrow-13.0.0-cp38-cp38-macosx_10_14_x86_64.whl", hash = "sha256:ee7490f0f3f16a6c38f8c680949551053c8194e68de5046e6c288e396dccee80"}, + {file = "pyarrow-13.0.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:e3ad79455c197a36eefbd90ad4aa832bece7f830a64396c15c61a0985e337287"}, + {file = "pyarrow-13.0.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:68fcd2dc1b7d9310b29a15949cdd0cb9bc34b6de767aff979ebf546020bf0ba0"}, + {file = "pyarrow-13.0.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dc6fd330fd574c51d10638e63c0d00ab456498fc804c9d01f2a61b9264f2c5b2"}, + {file = "pyarrow-13.0.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:e66442e084979a97bb66939e18f7b8709e4ac5f887e636aba29486ffbf373763"}, + {file = "pyarrow-13.0.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:0f6eff839a9e40e9c5610d3ff8c5bdd2f10303408312caf4c8003285d0b49565"}, + {file = "pyarrow-13.0.0-cp38-cp38-win_amd64.whl", hash = "sha256:8b30a27f1cddf5c6efcb67e598d7823a1e253d743d92ac32ec1eb4b6a1417867"}, + {file = "pyarrow-13.0.0-cp39-cp39-macosx_10_14_x86_64.whl", hash = "sha256:09552dad5cf3de2dc0aba1c7c4b470754c69bd821f5faafc3d774bedc3b04bb7"}, + {file = "pyarrow-13.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:3896ae6c205d73ad192d2fc1489cd0edfab9f12867c85b4c277af4d37383c18c"}, + {file = "pyarrow-13.0.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6647444b21cb5e68b593b970b2a9a07748dd74ea457c7dadaa15fd469c48ada1"}, + {file = "pyarrow-13.0.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:47663efc9c395e31d09c6aacfa860f4473815ad6804311c5433f7085415d62a7"}, + {file = "pyarrow-13.0.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:b9ba6b6d34bd2563345488cf444510588ea42ad5613df3b3509f48eb80250afd"}, + {file = "pyarrow-13.0.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:d00d374a5625beeb448a7fa23060df79adb596074beb3ddc1838adb647b6ef09"}, + {file = "pyarrow-13.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:c51afd87c35c8331b56f796eff954b9c7f8d4b7fef5903daf4e05fcf017d23a8"}, + {file = "pyarrow-13.0.0.tar.gz", hash = "sha256:83333726e83ed44b0ac94d8d7a21bbdee4a05029c3b1e8db58a863eec8fd8a33"}, +] + +[package.dependencies] +numpy = ">=1.16.6" + +[[package]] +name = "pyasn1" +version = "0.5.0" +description = "Pure-Python implementation of ASN.1 types and DER/BER/CER codecs (X.208)" +optional = true +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7" +files = [ + {file = "pyasn1-0.5.0-py2.py3-none-any.whl", hash = "sha256:87a2121042a1ac9358cabcaf1d07680ff97ee6404333bacca15f76aa8ad01a57"}, + {file = "pyasn1-0.5.0.tar.gz", hash = "sha256:97b7290ca68e62a832558ec3976f15cbf911bf5d7c7039d8b861c2a0ece69fde"}, +] + +[[package]] +name = "pyasn1-modules" +version = "0.3.0" +description = "A collection of ASN.1-based protocols modules" +optional = true +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7" +files = [ + {file = "pyasn1_modules-0.3.0-py2.py3-none-any.whl", hash = "sha256:d3ccd6ed470d9ffbc716be08bd90efbd44d0734bc9303818f7336070984a162d"}, + {file = "pyasn1_modules-0.3.0.tar.gz", hash = "sha256:5bd01446b736eb9d31512a30d46c1ac3395d676c6f3cafa4c03eb54b9925631c"}, +] + +[package.dependencies] +pyasn1 = ">=0.4.6,<0.6.0" + +[[package]] +name = "pycparser" +version = "2.21" +description = "C parser in Python" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +files = [ + {file = "pycparser-2.21-py2.py3-none-any.whl", hash = "sha256:8ee45429555515e1f6b185e78100aea234072576aa43ab53aefcae078162fca9"}, + {file = "pycparser-2.21.tar.gz", hash = "sha256:e644fdec12f7872f86c58ff790da456218b10f863970249516d60a5eaca77206"}, +] + +[[package]] +name = "pydantic" +version = "2.3.0" +description = "Data validation using Python type hints" +optional = false +python-versions = ">=3.7" +files = [ + {file = "pydantic-2.3.0-py3-none-any.whl", hash = "sha256:45b5e446c6dfaad9444819a293b921a40e1db1aa61ea08aede0522529ce90e81"}, + {file = "pydantic-2.3.0.tar.gz", hash = "sha256:1607cc106602284cd4a00882986570472f193fde9cb1259bceeaedb26aa79a6d"}, +] + +[package.dependencies] +annotated-types = ">=0.4.0" +pydantic-core = "2.6.3" +typing-extensions = ">=4.6.1" + +[package.extras] +email = ["email-validator (>=2.0.0)"] + +[[package]] +name = "pydantic-core" +version = "2.6.3" +description = "" +optional = false +python-versions = ">=3.7" +files = [ + {file = "pydantic_core-2.6.3-cp310-cp310-macosx_10_7_x86_64.whl", hash = "sha256:1a0ddaa723c48af27d19f27f1c73bdc615c73686d763388c8683fe34ae777bad"}, + {file = "pydantic_core-2.6.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:5cfde4fab34dd1e3a3f7f3db38182ab6c95e4ea91cf322242ee0be5c2f7e3d2f"}, + {file = "pydantic_core-2.6.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5493a7027bfc6b108e17c3383959485087d5942e87eb62bbac69829eae9bc1f7"}, + {file = "pydantic_core-2.6.3-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:84e87c16f582f5c753b7f39a71bd6647255512191be2d2dbf49458c4ef024588"}, + {file = "pydantic_core-2.6.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:522a9c4a4d1924facce7270c84b5134c5cabcb01513213662a2e89cf28c1d309"}, + {file = "pydantic_core-2.6.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:aaafc776e5edc72b3cad1ccedb5fd869cc5c9a591f1213aa9eba31a781be9ac1"}, + {file = "pydantic_core-2.6.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3a750a83b2728299ca12e003d73d1264ad0440f60f4fc9cee54acc489249b728"}, + {file = "pydantic_core-2.6.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:9e8b374ef41ad5c461efb7a140ce4730661aadf85958b5c6a3e9cf4e040ff4bb"}, + {file = "pydantic_core-2.6.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:b594b64e8568cf09ee5c9501ede37066b9fc41d83d58f55b9952e32141256acd"}, + {file = "pydantic_core-2.6.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:2a20c533cb80466c1d42a43a4521669ccad7cf2967830ac62c2c2f9cece63e7e"}, + {file = "pydantic_core-2.6.3-cp310-none-win32.whl", hash = "sha256:04fe5c0a43dec39aedba0ec9579001061d4653a9b53a1366b113aca4a3c05ca7"}, + {file = "pydantic_core-2.6.3-cp310-none-win_amd64.whl", hash = "sha256:6bf7d610ac8f0065a286002a23bcce241ea8248c71988bda538edcc90e0c39ad"}, + {file = "pydantic_core-2.6.3-cp311-cp311-macosx_10_7_x86_64.whl", hash = "sha256:6bcc1ad776fffe25ea5c187a028991c031a00ff92d012ca1cc4714087e575973"}, + {file = "pydantic_core-2.6.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:df14f6332834444b4a37685810216cc8fe1fe91f447332cd56294c984ecbff1c"}, + {file = "pydantic_core-2.6.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0b7486d85293f7f0bbc39b34e1d8aa26210b450bbd3d245ec3d732864009819"}, + {file = "pydantic_core-2.6.3-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a892b5b1871b301ce20d40b037ffbe33d1407a39639c2b05356acfef5536d26a"}, + {file = "pydantic_core-2.6.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:883daa467865e5766931e07eb20f3e8152324f0adf52658f4d302242c12e2c32"}, + {file = "pydantic_core-2.6.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d4eb77df2964b64ba190eee00b2312a1fd7a862af8918ec70fc2d6308f76ac64"}, + {file = "pydantic_core-2.6.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1ce8c84051fa292a5dc54018a40e2a1926fd17980a9422c973e3ebea017aa8da"}, + {file = "pydantic_core-2.6.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:22134a4453bd59b7d1e895c455fe277af9d9d9fbbcb9dc3f4a97b8693e7e2c9b"}, + {file = "pydantic_core-2.6.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:02e1c385095efbd997311d85c6021d32369675c09bcbfff3b69d84e59dc103f6"}, + {file = "pydantic_core-2.6.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d79f1f2f7ebdb9b741296b69049ff44aedd95976bfee38eb4848820628a99b50"}, + {file = "pydantic_core-2.6.3-cp311-none-win32.whl", hash = "sha256:430ddd965ffd068dd70ef4e4d74f2c489c3a313adc28e829dd7262cc0d2dd1e8"}, + {file = "pydantic_core-2.6.3-cp311-none-win_amd64.whl", hash = "sha256:84f8bb34fe76c68c9d96b77c60cef093f5e660ef8e43a6cbfcd991017d375950"}, + {file = "pydantic_core-2.6.3-cp311-none-win_arm64.whl", hash = "sha256:5a2a3c9ef904dcdadb550eedf3291ec3f229431b0084666e2c2aa8ff99a103a2"}, + {file = "pydantic_core-2.6.3-cp312-cp312-macosx_10_7_x86_64.whl", hash = "sha256:8421cf496e746cf8d6b677502ed9a0d1e4e956586cd8b221e1312e0841c002d5"}, + {file = "pydantic_core-2.6.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:bb128c30cf1df0ab78166ded1ecf876620fb9aac84d2413e8ea1594b588c735d"}, + {file = "pydantic_core-2.6.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:37a822f630712817b6ecc09ccc378192ef5ff12e2c9bae97eb5968a6cdf3b862"}, + {file = "pydantic_core-2.6.3-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:240a015102a0c0cc8114f1cba6444499a8a4d0333e178bc504a5c2196defd456"}, + {file = "pydantic_core-2.6.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3f90e5e3afb11268628c89f378f7a1ea3f2fe502a28af4192e30a6cdea1e7d5e"}, + {file = "pydantic_core-2.6.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:340e96c08de1069f3d022a85c2a8c63529fd88709468373b418f4cf2c949fb0e"}, + {file = "pydantic_core-2.6.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1480fa4682e8202b560dcdc9eeec1005f62a15742b813c88cdc01d44e85308e5"}, + {file = "pydantic_core-2.6.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f14546403c2a1d11a130b537dda28f07eb6c1805a43dae4617448074fd49c282"}, + {file = "pydantic_core-2.6.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:a87c54e72aa2ef30189dc74427421e074ab4561cf2bf314589f6af5b37f45e6d"}, + {file = "pydantic_core-2.6.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:f93255b3e4d64785554e544c1c76cd32f4a354fa79e2eeca5d16ac2e7fdd57aa"}, + {file = "pydantic_core-2.6.3-cp312-none-win32.whl", hash = "sha256:f70dc00a91311a1aea124e5f64569ea44c011b58433981313202c46bccbec0e1"}, + {file = "pydantic_core-2.6.3-cp312-none-win_amd64.whl", hash = "sha256:23470a23614c701b37252618e7851e595060a96a23016f9a084f3f92f5ed5881"}, + {file = "pydantic_core-2.6.3-cp312-none-win_arm64.whl", hash = "sha256:1ac1750df1b4339b543531ce793b8fd5c16660a95d13aecaab26b44ce11775e9"}, + {file = "pydantic_core-2.6.3-cp37-cp37m-macosx_10_7_x86_64.whl", hash = "sha256:a53e3195f134bde03620d87a7e2b2f2046e0e5a8195e66d0f244d6d5b2f6d31b"}, + {file = "pydantic_core-2.6.3-cp37-cp37m-macosx_11_0_arm64.whl", hash = "sha256:f2969e8f72c6236c51f91fbb79c33821d12a811e2a94b7aa59c65f8dbdfad34a"}, + {file = "pydantic_core-2.6.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:672174480a85386dd2e681cadd7d951471ad0bb028ed744c895f11f9d51b9ebe"}, + {file = "pydantic_core-2.6.3-cp37-cp37m-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:002d0ea50e17ed982c2d65b480bd975fc41086a5a2f9c924ef8fc54419d1dea3"}, + {file = "pydantic_core-2.6.3-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3ccc13afee44b9006a73d2046068d4df96dc5b333bf3509d9a06d1b42db6d8bf"}, + {file = "pydantic_core-2.6.3-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:439a0de139556745ae53f9cc9668c6c2053444af940d3ef3ecad95b079bc9987"}, + {file = "pydantic_core-2.6.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d63b7545d489422d417a0cae6f9898618669608750fc5e62156957e609e728a5"}, + {file = "pydantic_core-2.6.3-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b44c42edc07a50a081672e25dfe6022554b47f91e793066a7b601ca290f71e42"}, + {file = "pydantic_core-2.6.3-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:1c721bfc575d57305dd922e6a40a8fe3f762905851d694245807a351ad255c58"}, + {file = "pydantic_core-2.6.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:5e4a2cf8c4543f37f5dc881de6c190de08096c53986381daebb56a355be5dfe6"}, + {file = "pydantic_core-2.6.3-cp37-none-win32.whl", hash = "sha256:d9b4916b21931b08096efed090327f8fe78e09ae8f5ad44e07f5c72a7eedb51b"}, + {file = "pydantic_core-2.6.3-cp37-none-win_amd64.whl", hash = "sha256:a8acc9dedd304da161eb071cc7ff1326aa5b66aadec9622b2574ad3ffe225525"}, + {file = "pydantic_core-2.6.3-cp38-cp38-macosx_10_7_x86_64.whl", hash = "sha256:5e9c068f36b9f396399d43bfb6defd4cc99c36215f6ff33ac8b9c14ba15bdf6b"}, + {file = "pydantic_core-2.6.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:e61eae9b31799c32c5f9b7be906be3380e699e74b2db26c227c50a5fc7988698"}, + {file = "pydantic_core-2.6.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d85463560c67fc65cd86153a4975d0b720b6d7725cf7ee0b2d291288433fc21b"}, + {file = "pydantic_core-2.6.3-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:9616567800bdc83ce136e5847d41008a1d602213d024207b0ff6cab6753fe645"}, + {file = "pydantic_core-2.6.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9e9b65a55bbabda7fccd3500192a79f6e474d8d36e78d1685496aad5f9dbd92c"}, + {file = "pydantic_core-2.6.3-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f468d520f47807d1eb5d27648393519655eadc578d5dd862d06873cce04c4d1b"}, + {file = "pydantic_core-2.6.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9680dd23055dd874173a3a63a44e7f5a13885a4cfd7e84814be71be24fba83db"}, + {file = "pydantic_core-2.6.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:9a718d56c4d55efcfc63f680f207c9f19c8376e5a8a67773535e6f7e80e93170"}, + {file = "pydantic_core-2.6.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:8ecbac050856eb6c3046dea655b39216597e373aa8e50e134c0e202f9c47efec"}, + {file = "pydantic_core-2.6.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:788be9844a6e5c4612b74512a76b2153f1877cd845410d756841f6c3420230eb"}, + {file = "pydantic_core-2.6.3-cp38-none-win32.whl", hash = "sha256:07a1aec07333bf5adebd8264047d3dc518563d92aca6f2f5b36f505132399efc"}, + {file = "pydantic_core-2.6.3-cp38-none-win_amd64.whl", hash = "sha256:621afe25cc2b3c4ba05fff53525156d5100eb35c6e5a7cf31d66cc9e1963e378"}, + {file = "pydantic_core-2.6.3-cp39-cp39-macosx_10_7_x86_64.whl", hash = "sha256:813aab5bfb19c98ae370952b6f7190f1e28e565909bfc219a0909db168783465"}, + {file = "pydantic_core-2.6.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:50555ba3cb58f9861b7a48c493636b996a617db1a72c18da4d7f16d7b1b9952b"}, + {file = "pydantic_core-2.6.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:19e20f8baedd7d987bd3f8005c146e6bcbda7cdeefc36fad50c66adb2dd2da48"}, + {file = "pydantic_core-2.6.3-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b0a5d7edb76c1c57b95df719af703e796fc8e796447a1da939f97bfa8a918d60"}, + {file = "pydantic_core-2.6.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f06e21ad0b504658a3a9edd3d8530e8cea5723f6ea5d280e8db8efc625b47e49"}, + {file = "pydantic_core-2.6.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ea053cefa008fda40f92aab937fb9f183cf8752e41dbc7bc68917884454c6362"}, + {file = "pydantic_core-2.6.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:171a4718860790f66d6c2eda1d95dd1edf64f864d2e9f9115840840cf5b5713f"}, + {file = "pydantic_core-2.6.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:5ed7ceca6aba5331ece96c0e328cd52f0dcf942b8895a1ed2642de50800b79d3"}, + {file = "pydantic_core-2.6.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:acafc4368b289a9f291e204d2c4c75908557d4f36bd3ae937914d4529bf62a76"}, + {file = "pydantic_core-2.6.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:1aa712ba150d5105814e53cb141412217146fedc22621e9acff9236d77d2a5ef"}, + {file = "pydantic_core-2.6.3-cp39-none-win32.whl", hash = "sha256:44b4f937b992394a2e81a5c5ce716f3dcc1237281e81b80c748b2da6dd5cf29a"}, + {file = "pydantic_core-2.6.3-cp39-none-win_amd64.whl", hash = "sha256:9b33bf9658cb29ac1a517c11e865112316d09687d767d7a0e4a63d5c640d1b17"}, + {file = "pydantic_core-2.6.3-pp310-pypy310_pp73-macosx_10_7_x86_64.whl", hash = "sha256:d7050899026e708fb185e174c63ebc2c4ee7a0c17b0a96ebc50e1f76a231c057"}, + {file = "pydantic_core-2.6.3-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:99faba727727b2e59129c59542284efebbddade4f0ae6a29c8b8d3e1f437beb7"}, + {file = "pydantic_core-2.6.3-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5fa159b902d22b283b680ef52b532b29554ea2a7fc39bf354064751369e9dbd7"}, + {file = "pydantic_core-2.6.3-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:046af9cfb5384f3684eeb3f58a48698ddab8dd870b4b3f67f825353a14441418"}, + {file = "pydantic_core-2.6.3-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:930bfe73e665ebce3f0da2c6d64455098aaa67e1a00323c74dc752627879fc67"}, + {file = "pydantic_core-2.6.3-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:85cc4d105747d2aa3c5cf3e37dac50141bff779545ba59a095f4a96b0a460e70"}, + {file = "pydantic_core-2.6.3-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:b25afe9d5c4f60dcbbe2b277a79be114e2e65a16598db8abee2a2dcde24f162b"}, + {file = "pydantic_core-2.6.3-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:e49ce7dc9f925e1fb010fc3d555250139df61fa6e5a0a95ce356329602c11ea9"}, + {file = "pydantic_core-2.6.3-pp37-pypy37_pp73-macosx_10_7_x86_64.whl", hash = "sha256:2dd50d6a1aef0426a1d0199190c6c43ec89812b1f409e7fe44cb0fbf6dfa733c"}, + {file = "pydantic_core-2.6.3-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c6595b0d8c8711e8e1dc389d52648b923b809f68ac1c6f0baa525c6440aa0daa"}, + {file = "pydantic_core-2.6.3-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4ef724a059396751aef71e847178d66ad7fc3fc969a1a40c29f5aac1aa5f8784"}, + {file = "pydantic_core-2.6.3-pp37-pypy37_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:3c8945a105f1589ce8a693753b908815e0748f6279959a4530f6742e1994dcb6"}, + {file = "pydantic_core-2.6.3-pp37-pypy37_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:c8c6660089a25d45333cb9db56bb9e347241a6d7509838dbbd1931d0e19dbc7f"}, + {file = "pydantic_core-2.6.3-pp37-pypy37_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:692b4ff5c4e828a38716cfa92667661a39886e71136c97b7dac26edef18767f7"}, + {file = "pydantic_core-2.6.3-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:f1a5d8f18877474c80b7711d870db0eeef9442691fcdb00adabfc97e183ee0b0"}, + {file = "pydantic_core-2.6.3-pp38-pypy38_pp73-macosx_10_7_x86_64.whl", hash = "sha256:3796a6152c545339d3b1652183e786df648ecdf7c4f9347e1d30e6750907f5bb"}, + {file = "pydantic_core-2.6.3-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:b962700962f6e7a6bd77e5f37320cabac24b4c0f76afeac05e9f93cf0c620014"}, + {file = "pydantic_core-2.6.3-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:56ea80269077003eaa59723bac1d8bacd2cd15ae30456f2890811efc1e3d4413"}, + {file = "pydantic_core-2.6.3-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:75c0ebbebae71ed1e385f7dfd9b74c1cff09fed24a6df43d326dd7f12339ec34"}, + {file = "pydantic_core-2.6.3-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:252851b38bad3bfda47b104ffd077d4f9604a10cb06fe09d020016a25107bf98"}, + {file = "pydantic_core-2.6.3-pp38-pypy38_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:6656a0ae383d8cd7cc94e91de4e526407b3726049ce8d7939049cbfa426518c8"}, + {file = "pydantic_core-2.6.3-pp38-pypy38_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:d9140ded382a5b04a1c030b593ed9bf3088243a0a8b7fa9f071a5736498c5483"}, + {file = "pydantic_core-2.6.3-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:d38bbcef58220f9c81e42c255ef0bf99735d8f11edef69ab0b499da77105158a"}, + {file = "pydantic_core-2.6.3-pp39-pypy39_pp73-macosx_10_7_x86_64.whl", hash = "sha256:c9d469204abcca28926cbc28ce98f28e50e488767b084fb3fbdf21af11d3de26"}, + {file = "pydantic_core-2.6.3-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:48c1ed8b02ffea4d5c9c220eda27af02b8149fe58526359b3c07eb391cb353a2"}, + {file = "pydantic_core-2.6.3-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8b2b1bfed698fa410ab81982f681f5b1996d3d994ae8073286515ac4d165c2e7"}, + {file = "pydantic_core-2.6.3-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bf9d42a71a4d7a7c1f14f629e5c30eac451a6fc81827d2beefd57d014c006c4a"}, + {file = "pydantic_core-2.6.3-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:4292ca56751aebbe63a84bbfc3b5717abb09b14d4b4442cc43fd7c49a1529efd"}, + {file = "pydantic_core-2.6.3-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:7dc2ce039c7290b4ef64334ec7e6ca6494de6eecc81e21cb4f73b9b39991408c"}, + {file = "pydantic_core-2.6.3-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:615a31b1629e12445c0e9fc8339b41aaa6cc60bd53bf802d5fe3d2c0cda2ae8d"}, + {file = "pydantic_core-2.6.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:1fa1f6312fb84e8c281f32b39affe81984ccd484da6e9d65b3d18c202c666149"}, + {file = "pydantic_core-2.6.3.tar.gz", hash = "sha256:1508f37ba9e3ddc0189e6ff4e2228bd2d3c3a4641cbe8c07177162f76ed696c7"}, +] + +[package.dependencies] +typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0" + +[[package]] +name = "pygments" +version = "2.16.1" +description = "Pygments is a syntax highlighting package written in Python." +optional = false +python-versions = ">=3.7" +files = [ + {file = "Pygments-2.16.1-py3-none-any.whl", hash = "sha256:13fc09fa63bc8d8671a6d247e1eb303c4b343eaee81d861f3404db2935653692"}, + {file = "Pygments-2.16.1.tar.gz", hash = "sha256:1daff0494820c69bc8941e407aa20f577374ee88364ee10a98fdbe0aece96e29"}, +] + +[package.extras] +plugins = ["importlib-metadata"] + +[[package]] +name = "pyjwt" +version = "2.8.0" +description = "JSON Web Token implementation in Python" +optional = true +python-versions = ">=3.7" +files = [ + {file = "PyJWT-2.8.0-py3-none-any.whl", hash = "sha256:59127c392cc44c2da5bb3192169a91f429924e17aff6534d70fdc02ab3e04320"}, + {file = "PyJWT-2.8.0.tar.gz", hash = "sha256:57e28d156e3d5c10088e0c68abb90bfac3df82b40a71bd0daa20c65ccd5c23de"}, +] + +[package.dependencies] +cryptography = {version = ">=3.4.0", optional = true, markers = "extra == \"crypto\""} + +[package.extras] +crypto = ["cryptography (>=3.4.0)"] +dev = ["coverage[toml] (==5.0.4)", "cryptography (>=3.4.0)", "pre-commit", "pytest (>=6.0.0,<7.0.0)", "sphinx (>=4.5.0,<5.0.0)", "sphinx-rtd-theme", "zope.interface"] +docs = ["sphinx (>=4.5.0,<5.0.0)", "sphinx-rtd-theme", "zope.interface"] +tests = ["coverage[toml] (==5.0.4)", "pytest (>=6.0.0,<7.0.0)"] + +[[package]] +name = "pyparsing" +version = "3.1.1" +description = "pyparsing module - Classes and methods to define and execute parsing grammars" +optional = false +python-versions = ">=3.6.8" +files = [ + {file = "pyparsing-3.1.1-py3-none-any.whl", hash = "sha256:32c7c0b711493c72ff18a981d24f28aaf9c1fb7ed5e9667c9e84e3db623bdbfb"}, + {file = "pyparsing-3.1.1.tar.gz", hash = "sha256:ede28a1a32462f5a9705e07aea48001a08f7cf81a021585011deba701581a0db"}, +] + +[package.extras] +diagrams = ["jinja2", "railroad-diagrams"] + +[[package]] +name = "pyproject-hooks" +version = "1.0.0" +description = "Wrappers to call pyproject.toml-based build backend hooks." +optional = false +python-versions = ">=3.7" +files = [ + {file = "pyproject_hooks-1.0.0-py3-none-any.whl", hash = "sha256:283c11acd6b928d2f6a7c73fa0d01cb2bdc5f07c57a2eeb6e83d5e56b97976f8"}, + {file = "pyproject_hooks-1.0.0.tar.gz", hash = "sha256:f271b298b97f5955d53fb12b72c1fb1948c22c1a6b70b315c54cedaca0264ef5"}, +] + +[package.dependencies] +tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} + +[[package]] +name = "pytest" +version = "7.4.2" +description = "pytest: simple powerful testing with Python" +optional = false +python-versions = ">=3.7" +files = [ + {file = "pytest-7.4.2-py3-none-any.whl", hash = "sha256:1d881c6124e08ff0a1bb75ba3ec0bfd8b5354a01c194ddd5a0a870a48d99b002"}, + {file = "pytest-7.4.2.tar.gz", hash = "sha256:a766259cfab564a2ad52cb1aae1b881a75c3eb7e34ca3779697c23ed47c47069"}, +] + +[package.dependencies] +colorama = {version = "*", markers = "sys_platform == \"win32\""} +exceptiongroup = {version = ">=1.0.0rc8", markers = "python_version < \"3.11\""} +iniconfig = "*" +packaging = "*" +pluggy = ">=0.12,<2.0" +tomli = {version = ">=1.0.0", markers = "python_version < \"3.11\""} + +[package.extras] +testing = ["argcomplete", "attrs (>=19.2.0)", "hypothesis (>=3.56)", "mock", "nose", "pygments (>=2.7.2)", "requests", "setuptools", "xmlschema"] + +[[package]] +name = "pytest-checkdocs" +version = "2.10.1" +description = "check the README when running tests" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pytest-checkdocs-2.10.1.tar.gz", hash = "sha256:393868583f2d0314f8c5828fd94f7d28699543f6a0a925356d7e274e2952297e"}, + {file = "pytest_checkdocs-2.10.1-py3-none-any.whl", hash = "sha256:f069d6408633697023298ebf66c9bb1cb915c3ae5f047457b507229a4784e153"}, +] + +[package.dependencies] +build = "*" +docutils = ">=0.15" +importlib-metadata = {version = ">=4", markers = "python_version < \"3.10\""} + +[package.extras] +docs = ["furo", "jaraco.packaging (>=9.3)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] +testing = ["pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy (>=0.9.1)", "pytest-ruff", "types-docutils"] + +[[package]] +name = "pytest-mock" +version = "3.11.1" +description = "Thin-wrapper around the mock package for easier use with pytest" +optional = false +python-versions = ">=3.7" +files = [ + {file = "pytest-mock-3.11.1.tar.gz", hash = "sha256:7f6b125602ac6d743e523ae0bfa71e1a697a2f5534064528c6ff84c2f7c2fc7f"}, + {file = "pytest_mock-3.11.1-py3-none-any.whl", hash = "sha256:21c279fff83d70763b05f8874cc9cfb3fcacd6d354247a976f9529d19f9acf39"}, +] + +[package.dependencies] +pytest = ">=5.0" + +[package.extras] +dev = ["pre-commit", "pytest-asyncio", "tox"] + +[[package]] +name = "python-dateutil" +version = "2.8.2" +description = "Extensions to the standard Python datetime module" +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" +files = [ + {file = "python-dateutil-2.8.2.tar.gz", hash = "sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86"}, + {file = "python_dateutil-2.8.2-py2.py3-none-any.whl", hash = "sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9"}, +] + +[package.dependencies] +six = ">=1.5" + +[[package]] +name = "python-snappy" +version = "0.6.1" +description = "Python library for the snappy compression library from Google" +optional = true +python-versions = "*" +files = [ + {file = "python-snappy-0.6.1.tar.gz", hash = "sha256:b6a107ab06206acc5359d4c5632bd9b22d448702a79b3169b0c62e0fb808bb2a"}, + {file = "python_snappy-0.6.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:b7f920eaf46ebf41bd26f9df51c160d40f9e00b7b48471c3438cb8d027f7fb9b"}, + {file = "python_snappy-0.6.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:4ec533a8c1f8df797bded662ec3e494d225b37855bb63eb0d75464a07947477c"}, + {file = "python_snappy-0.6.1-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:6f8bf4708a11b47517baf962f9a02196478bbb10fdb9582add4aa1459fa82380"}, + {file = "python_snappy-0.6.1-cp310-cp310-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:8d0c019ee7dcf2c60e240877107cddbd95a5b1081787579bf179938392d66480"}, + {file = "python_snappy-0.6.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cb18d9cd7b3f35a2f5af47bb8ed6a5bdbf4f3ddee37f3daade4ab7864c292f5b"}, + {file = "python_snappy-0.6.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b265cde49774752aec9ca7f5d272e3f98718164afc85521622a8a5394158a2b5"}, + {file = "python_snappy-0.6.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d017775851a778ec9cc32651c4464079d06d927303c2dde9ae9830ccf6fe94e1"}, + {file = "python_snappy-0.6.1-cp310-cp310-win32.whl", hash = "sha256:8277d1f6282463c40761f802b742f833f9f2449fcdbb20a96579aa05c8feb614"}, + {file = "python_snappy-0.6.1-cp310-cp310-win_amd64.whl", hash = "sha256:2aaaf618c68d8c9daebc23a20436bd01b09ee70d7fbf7072b7f38b06d2fab539"}, + {file = "python_snappy-0.6.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:277757d5dad4e239dc1417438a0871b65b1b155beb108888e7438c27ffc6a8cc"}, + {file = "python_snappy-0.6.1-cp36-cp36m-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:e066a0586833d610c4bbddba0be5ba0e3e4f8e0bc5bb6d82103d8f8fc47bb59a"}, + {file = "python_snappy-0.6.1-cp36-cp36m-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:0d489b50f49433494160c45048fe806de6b3aeab0586e497ebd22a0bab56e427"}, + {file = "python_snappy-0.6.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:463fd340a499d47b26ca42d2f36a639188738f6e2098c6dbf80aef0e60f461e1"}, + {file = "python_snappy-0.6.1-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9837ac1650cc68d22a3cf5f15fb62c6964747d16cecc8b22431f113d6e39555d"}, + {file = "python_snappy-0.6.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5e973e637112391f05581f427659c05b30b6843bc522a65be35ac7b18ce3dedd"}, + {file = "python_snappy-0.6.1-cp36-cp36m-win32.whl", hash = "sha256:c20498bd712b6e31a4402e1d027a1cd64f6a4a0066a3fe3c7344475886d07fdf"}, + {file = "python_snappy-0.6.1-cp36-cp36m-win_amd64.whl", hash = "sha256:59e975be4206cc54d0a112ef72fa3970a57c2b1bcc2c97ed41d6df0ebe518228"}, + {file = "python_snappy-0.6.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:2a7e528ab6e09c0d67dcb61a1730a292683e5ff9bb088950638d3170cf2a0a54"}, + {file = "python_snappy-0.6.1-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:39692bedbe0b717001a99915ac0eb2d9d0bad546440d392a2042b96d813eede1"}, + {file = "python_snappy-0.6.1-cp37-cp37m-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:6a7620404da966f637b9ce8d4d3d543d363223f7a12452a575189c5355fc2d25"}, + {file = "python_snappy-0.6.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7778c224efc38a40d274da4eb82a04cac27aae20012372a7db3c4bbd8926c4d4"}, + {file = "python_snappy-0.6.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1d029f7051ec1bbeaa3e03030b6d8ed47ceb69cae9016f493c802a08af54e026"}, + {file = "python_snappy-0.6.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a0ad38bc98d0b0497a0b0dbc29409bcabfcecff4511ed7063403c86de16927bc"}, + {file = "python_snappy-0.6.1-cp37-cp37m-win32.whl", hash = "sha256:5a453c45178d7864c1bdd6bfe0ee3ed2883f63b9ba2c9bb967c6b586bf763f96"}, + {file = "python_snappy-0.6.1-cp37-cp37m-win_amd64.whl", hash = "sha256:9f0c0d88b84259f93c3aa46398680646f2c23e43394779758d9f739c34e15295"}, + {file = "python_snappy-0.6.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:5bb05c28298803a74add08ba496879242ef159c75bc86a5406fac0ffc7dd021b"}, + {file = "python_snappy-0.6.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:9eac51307c6a1a38d5f86ebabc26a889fddf20cbba7a116ccb54ba1446601d5b"}, + {file = "python_snappy-0.6.1-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:88b6ea78b83d2796f330b0af1b70cdd3965dbdab02d8ac293260ec2c8fe340ee"}, + {file = "python_snappy-0.6.1-cp38-cp38-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:8c07220408d3268e8268c9351c5c08041bc6f8c6172e59d398b71020df108541"}, + {file = "python_snappy-0.6.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4038019b1bcaadde726a57430718394076c5a21545ebc5badad2c045a09546cf"}, + {file = "python_snappy-0.6.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:dc96668d9c7cc656609764275c5f8da58ef56d89bdd6810f6923d36497468ff7"}, + {file = "python_snappy-0.6.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cf5bb9254e1c38aacf253d510d3d9be631bba21f3d068b17672b38b5cbf2fff5"}, + {file = "python_snappy-0.6.1-cp38-cp38-win32.whl", hash = "sha256:eaf905a580f2747c4a474040a5063cd5e0cc3d1d2d6edb65f28196186493ad4a"}, + {file = "python_snappy-0.6.1-cp38-cp38-win_amd64.whl", hash = "sha256:546c1a7470ecbf6239101e9aff0f709b68ca0f0268b34d9023019a55baa1f7c6"}, + {file = "python_snappy-0.6.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:e3a013895c64352b49d0d8e107a84f99631b16dbab156ded33ebf0becf56c8b2"}, + {file = "python_snappy-0.6.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3fb9a88a4dd6336488f3de67ce75816d0d796dce53c2c6e4d70e0b565633c7fd"}, + {file = "python_snappy-0.6.1-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:735cd4528c55dbe4516d6d2b403331a99fc304f8feded8ae887cf97b67d589bb"}, + {file = "python_snappy-0.6.1-cp39-cp39-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:90b0186516b7a101c14764b0c25931b741fb0102f21253eff67847b4742dfc72"}, + {file = "python_snappy-0.6.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1a993dc8aadd901915a510fe6af5f20ae4256f527040066c22a154db8946751f"}, + {file = "python_snappy-0.6.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:530bfb9efebcc1aab8bb4ebcbd92b54477eed11f6cf499355e882970a6d3aa7d"}, + {file = "python_snappy-0.6.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5843feb914796b1f0405ccf31ea0fb51034ceb65a7588edfd5a8250cb369e3b2"}, + {file = "python_snappy-0.6.1-cp39-cp39-win32.whl", hash = "sha256:66c80e9b366012dbee262bb1869e4fc5ba8786cda85928481528bc4a72ec2ee8"}, + {file = "python_snappy-0.6.1-cp39-cp39-win_amd64.whl", hash = "sha256:4d3cafdf454354a621c8ab7408e45aa4e9d5c0b943b61ff4815f71ca6bdf0130"}, + {file = "python_snappy-0.6.1-pp37-pypy37_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:586724a0276d7a6083a17259d0b51622e492289a9998848a1b01b6441ca12b2f"}, + {file = "python_snappy-0.6.1-pp37-pypy37_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:2be4f4550acd484912441f5f1209ba611ac399aac9355fee73611b9a0d4f949c"}, + {file = "python_snappy-0.6.1-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0bdb6942180660bda7f7d01f4c0def3cfc72b1c6d99aad964801775a3e379aba"}, + {file = "python_snappy-0.6.1-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:03bb511380fca2a13325b6f16fe8234c8e12da9660f0258cd45d9a02ffc916af"}, +] + +[[package]] +name = "pytz" +version = "2023.3.post1" +description = "World timezone definitions, modern and historical" +optional = true +python-versions = "*" +files = [ + {file = "pytz-2023.3.post1-py2.py3-none-any.whl", hash = "sha256:ce42d816b81b68506614c11e8937d3aa9e41007ceb50bfdcb0749b921bf646c7"}, + {file = "pytz-2023.3.post1.tar.gz", hash = "sha256:7b4fddbeb94a1eba4b557da24f19fdf9db575192544270a9101d8509f9f43d7b"}, +] + +[[package]] +name = "pywin32" +version = "306" +description = "Python for Window Extensions" +optional = true +python-versions = "*" +files = [ + {file = "pywin32-306-cp310-cp310-win32.whl", hash = "sha256:06d3420a5155ba65f0b72f2699b5bacf3109f36acbe8923765c22938a69dfc8d"}, + {file = "pywin32-306-cp310-cp310-win_amd64.whl", hash = "sha256:84f4471dbca1887ea3803d8848a1616429ac94a4a8d05f4bc9c5dcfd42ca99c8"}, + {file = "pywin32-306-cp311-cp311-win32.whl", hash = "sha256:e65028133d15b64d2ed8f06dd9fbc268352478d4f9289e69c190ecd6818b6407"}, + {file = "pywin32-306-cp311-cp311-win_amd64.whl", hash = "sha256:a7639f51c184c0272e93f244eb24dafca9b1855707d94c192d4a0b4c01e1100e"}, + {file = "pywin32-306-cp311-cp311-win_arm64.whl", hash = "sha256:70dba0c913d19f942a2db25217d9a1b726c278f483a919f1abfed79c9cf64d3a"}, + {file = "pywin32-306-cp312-cp312-win32.whl", hash = "sha256:383229d515657f4e3ed1343da8be101000562bf514591ff383ae940cad65458b"}, + {file = "pywin32-306-cp312-cp312-win_amd64.whl", hash = "sha256:37257794c1ad39ee9be652da0462dc2e394c8159dfd913a8a4e8eb6fd346da0e"}, + {file = "pywin32-306-cp312-cp312-win_arm64.whl", hash = "sha256:5821ec52f6d321aa59e2db7e0a35b997de60c201943557d108af9d4ae1ec7040"}, + {file = "pywin32-306-cp37-cp37m-win32.whl", hash = "sha256:1c73ea9a0d2283d889001998059f5eaaba3b6238f767c9cf2833b13e6a685f65"}, + {file = "pywin32-306-cp37-cp37m-win_amd64.whl", hash = "sha256:72c5f621542d7bdd4fdb716227be0dd3f8565c11b280be6315b06ace35487d36"}, + {file = "pywin32-306-cp38-cp38-win32.whl", hash = "sha256:e4c092e2589b5cf0d365849e73e02c391c1349958c5ac3e9d5ccb9a28e017b3a"}, + {file = "pywin32-306-cp38-cp38-win_amd64.whl", hash = "sha256:e8ac1ae3601bee6ca9f7cb4b5363bf1c0badb935ef243c4733ff9a393b1690c0"}, + {file = "pywin32-306-cp39-cp39-win32.whl", hash = "sha256:e25fd5b485b55ac9c057f67d94bc203f3f6595078d1fb3b458c9c28b7153a802"}, + {file = "pywin32-306-cp39-cp39-win_amd64.whl", hash = "sha256:39b61c15272833b5c329a2989999dcae836b1eed650252ab1b7bfbe1d59f30f4"}, +] + +[[package]] +name = "pyyaml" +version = "6.0.1" +description = "YAML parser and emitter for Python" +optional = false +python-versions = ">=3.6" +files = [ + {file = "PyYAML-6.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d858aa552c999bc8a8d57426ed01e40bef403cd8ccdd0fc5f6f04a00414cac2a"}, + {file = "PyYAML-6.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:fd66fc5d0da6d9815ba2cebeb4205f95818ff4b79c3ebe268e75d961704af52f"}, + {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938"}, + {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d"}, + {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515"}, + {file = "PyYAML-6.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:326c013efe8048858a6d312ddd31d56e468118ad4cdeda36c719bf5bb6192290"}, + {file = "PyYAML-6.0.1-cp310-cp310-win32.whl", hash = "sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924"}, + {file = "PyYAML-6.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d"}, + {file = "PyYAML-6.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007"}, + {file = "PyYAML-6.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f003ed9ad21d6a4713f0a9b5a7a0a79e08dd0f221aff4525a2be4c346ee60aab"}, + {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d"}, + {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc"}, + {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673"}, + {file = "PyYAML-6.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e7d73685e87afe9f3b36c799222440d6cf362062f78be1013661b00c5c6f678b"}, + {file = "PyYAML-6.0.1-cp311-cp311-win32.whl", hash = "sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741"}, + {file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"}, + {file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"}, + {file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9"}, + {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0"}, + {file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4"}, + {file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54"}, + {file = "PyYAML-6.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:0d3304d8c0adc42be59c5f8a4d9e3d7379e6955ad754aa9d6ab7a398b59dd1df"}, + {file = "PyYAML-6.0.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47"}, + {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98"}, + {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c"}, + {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:afd7e57eddb1a54f0f1a974bc4391af8bcce0b444685d936840f125cf046d5bd"}, + {file = "PyYAML-6.0.1-cp36-cp36m-win32.whl", hash = "sha256:fca0e3a251908a499833aa292323f32437106001d436eca0e6e7833256674585"}, + {file = "PyYAML-6.0.1-cp36-cp36m-win_amd64.whl", hash = "sha256:f22ac1c3cac4dbc50079e965eba2c1058622631e526bd9afd45fedd49ba781fa"}, + {file = "PyYAML-6.0.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:b1275ad35a5d18c62a7220633c913e1b42d44b46ee12554e5fd39c70a243d6a3"}, + {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:18aeb1bf9a78867dc38b259769503436b7c72f7a1f1f4c93ff9a17de54319b27"}, + {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:596106435fa6ad000c2991a98fa58eeb8656ef2325d7e158344fb33864ed87e3"}, + {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:baa90d3f661d43131ca170712d903e6295d1f7a0f595074f151c0aed377c9b9c"}, + {file = "PyYAML-6.0.1-cp37-cp37m-win32.whl", hash = "sha256:9046c58c4395dff28dd494285c82ba00b546adfc7ef001486fbf0324bc174fba"}, + {file = "PyYAML-6.0.1-cp37-cp37m-win_amd64.whl", hash = "sha256:4fb147e7a67ef577a588a0e2c17b6db51dda102c71de36f8549b6816a96e1867"}, + {file = "PyYAML-6.0.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1d4c7e777c441b20e32f52bd377e0c409713e8bb1386e1099c2415f26e479595"}, + {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5"}, + {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696"}, + {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735"}, + {file = "PyYAML-6.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:49a183be227561de579b4a36efbb21b3eab9651dd81b1858589f796549873dd6"}, + {file = "PyYAML-6.0.1-cp38-cp38-win32.whl", hash = "sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206"}, + {file = "PyYAML-6.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62"}, + {file = "PyYAML-6.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8"}, + {file = "PyYAML-6.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c8098ddcc2a85b61647b2590f825f3db38891662cfc2fc776415143f599bb859"}, + {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6"}, + {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0"}, + {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c"}, + {file = "PyYAML-6.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:04ac92ad1925b2cff1db0cfebffb6ffc43457495c9b3c39d3fcae417d7125dc5"}, + {file = "PyYAML-6.0.1-cp39-cp39-win32.whl", hash = "sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c"}, + {file = "PyYAML-6.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486"}, + {file = "PyYAML-6.0.1.tar.gz", hash = "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43"}, +] + +[[package]] +name = "ray" +version = "2.7.0" +description = "Ray provides a simple, universal API for building distributed applications." +optional = true +python-versions = "*" +files = [ + {file = "ray-2.7.0-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:bc911655908b61b2e9f59b8df158fcc62cd32080c468b484b539ebf0a4111d04"}, + {file = "ray-2.7.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:0ee8c14e1521559cd5802bfad3f0aba4a77afdfba57dd446162a7449c6e8ff68"}, + {file = "ray-2.7.0-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:ebde44af7d479ede21d1c2e68b5ccd8264e18df6e4f3c216d9e99c31e819bde6"}, + {file = "ray-2.7.0-cp310-cp310-manylinux2014_x86_64.whl", hash = "sha256:b83621f5d2d4079e6ae624c3bf30046a4fefa0ea7ea5e4a4dfe4b50c580b3768"}, + {file = "ray-2.7.0-cp310-cp310-win_amd64.whl", hash = "sha256:8e1b06abba6e227b8dde1ad861c587fb2608a6970d270e4755cd24a6f37ed565"}, + {file = "ray-2.7.0-cp311-cp311-macosx_10_15_x86_64.whl", hash = "sha256:5442d48719f033831a324f05b332d6e7181970d721e9504be2091cc9d9735394"}, + {file = "ray-2.7.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ca8225878cce7b9e2d0ca9668d9370893a7cee35629d11a3889a1b66a0007218"}, + {file = "ray-2.7.0-cp311-cp311-manylinux2014_aarch64.whl", hash = "sha256:a3f59dbb0780f9fa11f5bf96bef853b4cb95245456d4400e1c7bf2e514d12ab2"}, + {file = "ray-2.7.0-cp311-cp311-manylinux2014_x86_64.whl", hash = "sha256:8384b3f30bc1446ef810e9e894afa03238c5ac40d3c40c0740d82f347112015d"}, + {file = "ray-2.7.0-cp311-cp311-win_amd64.whl", hash = "sha256:8d4530e7024375505552dabd3f4441fc9ac7a5562365a81ba9afa14185433879"}, + {file = "ray-2.7.0-cp37-cp37m-macosx_10_15_x86_64.whl", hash = "sha256:c491b8051eef82b77d136c48a23d16485c0e54233303ccf68e9fe69a06c517e6"}, + {file = "ray-2.7.0-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:1684c434886cb7b263cdf98ed39d75dec343e949f7b14f3385d83bfe70ee8c80"}, + {file = "ray-2.7.0-cp37-cp37m-manylinux2014_x86_64.whl", hash = "sha256:856a9ae164b9b0aeaad54f3e78986eb19900ed3c74e26f51b02a7d8826c97e59"}, + {file = "ray-2.7.0-cp37-cp37m-win_amd64.whl", hash = "sha256:34925a90b6239de42592bb4524dcbdc59a9c65f1f74ad4d9f97f636bd59c73d7"}, + {file = "ray-2.7.0-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:60db240f37d80a80492e09a8d1e29b79d034431c6fcb651401e9e2d24d850793"}, + {file = "ray-2.7.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:835155fdeb8698eae426f3d9416e6b8165197fe5c1c74e1b02a429fc7f4ddcd2"}, + {file = "ray-2.7.0-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:442b7568946081d38c8addbc528e7b09fc1ee25453b4800c86b7e5ba4bce9dd3"}, + {file = "ray-2.7.0-cp38-cp38-manylinux2014_x86_64.whl", hash = "sha256:3825292b777b423e2cd34bf66e8e1e7701b04c6a5308f9f291ad5929b289dc47"}, + {file = "ray-2.7.0-cp38-cp38-win_amd64.whl", hash = "sha256:ce700322662946ad5c62a39b78e81feebcb855d378c49f5df6477c22f0ac1e5a"}, + {file = "ray-2.7.0-cp39-cp39-macosx_10_15_x86_64.whl", hash = "sha256:327c23aac5dd26ee4abe6cee70320322d63fdf97c6028fbb9555724b46a8f3e3"}, + {file = "ray-2.7.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a09021d45312ab7a44109b251984718b65fbff77df0b55e30e651193cdf42bff"}, + {file = "ray-2.7.0-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:f05fcb609962d14f4d23cc88a9d07cafa7077ce3c5d5ee99cd08a19067b7eecf"}, + {file = "ray-2.7.0-cp39-cp39-manylinux2014_x86_64.whl", hash = "sha256:0e0f7dbeb4444940c72b64fdecd6f331593466914b2dffeed03ce97225acec14"}, + {file = "ray-2.7.0-cp39-cp39-win_amd64.whl", hash = "sha256:29a0866316756ae18e232dd074adbf408dcdabe95d135a9a96b9a8c24393c983"}, +] + +[package.dependencies] +aiosignal = "*" +click = ">=7.0" +filelock = "*" +frozenlist = "*" +jsonschema = "*" +msgpack = ">=1.0.0,<2.0.0" +numpy = [ + {version = ">=1.16", markers = "python_version < \"3.9\""}, + {version = ">=1.19.3", markers = "python_version >= \"3.9\""}, +] +packaging = "*" +protobuf = ">=3.15.3,<3.19.5 || >3.19.5" +pyyaml = "*" +requests = "*" + +[package.extras] +air = ["aiohttp (>=3.7)", "aiohttp-cors", "aiorwlock", "colorful", "fastapi", "fsspec", "gpustat (>=1.0.0)", "grpcio (>=1.32.0)", "grpcio (>=1.42.0)", "numpy (>=1.20)", "opencensus", "pandas", "pandas (>=1.3)", "prometheus-client (>=0.7.1)", "py-spy (>=0.2.0)", "pyarrow (>=6.0.1)", "pydantic (<2)", "requests", "smart-open", "starlette", "tensorboardX (>=1.9)", "uvicorn", "virtualenv (>=20.0.24,<20.21.1)", "watchfiles"] +all = ["aiohttp (>=3.7)", "aiohttp-cors", "aiorwlock", "colorful", "dm-tree", "fastapi", "fsspec", "gpustat (>=1.0.0)", "grpcio (!=1.56.0)", "grpcio (>=1.32.0)", "grpcio (>=1.42.0)", "gymnasium (==0.28.1)", "lz4", "numpy (>=1.20)", "opencensus", "opentelemetry-api", "opentelemetry-exporter-otlp", "opentelemetry-sdk", "pandas", "pandas (>=1.3)", "prometheus-client (>=0.7.1)", "py-spy (>=0.2.0)", "pyarrow (>=6.0.1)", "pydantic (<2)", "pyyaml", "ray-cpp (==2.7.0)", "requests", "rich", "scikit-image", "scipy", "smart-open", "starlette", "tensorboardX (>=1.9)", "typer", "uvicorn", "virtualenv (>=20.0.24,<20.21.1)", "watchfiles"] +client = ["grpcio (!=1.56.0)"] +cpp = ["ray-cpp (==2.7.0)"] +data = ["fsspec", "numpy (>=1.20)", "pandas (>=1.3)", "pyarrow (>=6.0.1)"] +default = ["aiohttp (>=3.7)", "aiohttp-cors", "colorful", "gpustat (>=1.0.0)", "grpcio (>=1.32.0)", "grpcio (>=1.42.0)", "opencensus", "prometheus-client (>=0.7.1)", "py-spy (>=0.2.0)", "pydantic (<2)", "requests", "smart-open", "virtualenv (>=20.0.24,<20.21.1)"] +observability = ["opentelemetry-api", "opentelemetry-exporter-otlp", "opentelemetry-sdk"] +rllib = ["dm-tree", "fsspec", "gymnasium (==0.28.1)", "lz4", "pandas", "pyarrow (>=6.0.1)", "pyyaml", "requests", "rich", "scikit-image", "scipy", "tensorboardX (>=1.9)", "typer"] +serve = ["aiohttp (>=3.7)", "aiohttp-cors", "aiorwlock", "colorful", "fastapi", "gpustat (>=1.0.0)", "grpcio (>=1.32.0)", "grpcio (>=1.42.0)", "opencensus", "prometheus-client (>=0.7.1)", "py-spy (>=0.2.0)", "pydantic (<2)", "requests", "smart-open", "starlette", "uvicorn", "virtualenv (>=20.0.24,<20.21.1)", "watchfiles"] +serve-grpc = ["aiohttp (>=3.7)", "aiohttp-cors", "aiorwlock", "colorful", "fastapi", "gpustat (>=1.0.0)", "grpcio (>=1.32.0)", "grpcio (>=1.42.0)", "opencensus", "prometheus-client (>=0.7.1)", "py-spy (>=0.2.0)", "pydantic (<2)", "requests", "smart-open", "starlette", "uvicorn", "virtualenv (>=20.0.24,<20.21.1)", "watchfiles"] +train = ["fsspec", "pandas", "pyarrow (>=6.0.1)", "requests", "tensorboardX (>=1.9)"] +tune = ["fsspec", "pandas", "pyarrow (>=6.0.1)", "requests", "tensorboardX (>=1.9)"] + +[[package]] +name = "referencing" +version = "0.30.2" +description = "JSON Referencing + Python" +optional = true +python-versions = ">=3.8" +files = [ + {file = "referencing-0.30.2-py3-none-any.whl", hash = "sha256:449b6669b6121a9e96a7f9e410b245d471e8d48964c67113ce9afe50c8dd7bdf"}, + {file = "referencing-0.30.2.tar.gz", hash = "sha256:794ad8003c65938edcdbc027f1933215e0d0ccc0291e3ce20a4d87432b59efc0"}, +] + +[package.dependencies] +attrs = ">=22.2.0" +rpds-py = ">=0.7.0" + +[[package]] +name = "requests" +version = "2.31.0" +description = "Python HTTP for Humans." +optional = false +python-versions = ">=3.7" +files = [ + {file = "requests-2.31.0-py3-none-any.whl", hash = "sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f"}, + {file = "requests-2.31.0.tar.gz", hash = "sha256:942c5a758f98d790eaed1a29cb6eefc7ffb0d1cf7af05c3d2791656dbd6ad1e1"}, +] + +[package.dependencies] +certifi = ">=2017.4.17" +charset-normalizer = ">=2,<4" +idna = ">=2.5,<4" +urllib3 = ">=1.21.1,<3" + +[package.extras] +socks = ["PySocks (>=1.5.6,!=1.5.7)"] +use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] + +[[package]] +name = "requests-mock" +version = "1.11.0" +description = "Mock out responses from the requests package" +optional = false +python-versions = "*" +files = [ + {file = "requests-mock-1.11.0.tar.gz", hash = "sha256:ef10b572b489a5f28e09b708697208c4a3b2b89ef80a9f01584340ea357ec3c4"}, + {file = "requests_mock-1.11.0-py2.py3-none-any.whl", hash = "sha256:f7fae383f228633f6bececebdab236c478ace2284d6292c6e7e2867b9ab74d15"}, +] + +[package.dependencies] +requests = ">=2.3,<3" +six = "*" + +[package.extras] +fixture = ["fixtures"] +test = ["fixtures", "mock", "purl", "pytest", "requests-futures", "sphinx", "testtools"] + +[[package]] +name = "requests-oauthlib" +version = "1.3.1" +description = "OAuthlib authentication support for Requests." +optional = true +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +files = [ + {file = "requests-oauthlib-1.3.1.tar.gz", hash = "sha256:75beac4a47881eeb94d5ea5d6ad31ef88856affe2332b9aafb52c6452ccf0d7a"}, + {file = "requests_oauthlib-1.3.1-py2.py3-none-any.whl", hash = "sha256:2577c501a2fb8d05a304c09d090d6e47c306fef15809d102b327cf8364bddab5"}, +] + +[package.dependencies] +oauthlib = ">=3.0.0" +requests = ">=2.0.0" + +[package.extras] +rsa = ["oauthlib[signedtoken] (>=3.0.0)"] + +[[package]] +name = "responses" +version = "0.23.3" +description = "A utility library for mocking out the `requests` Python library." +optional = false +python-versions = ">=3.7" +files = [ + {file = "responses-0.23.3-py3-none-any.whl", hash = "sha256:e6fbcf5d82172fecc0aa1860fd91e58cbfd96cee5e96da5b63fa6eb3caa10dd3"}, + {file = "responses-0.23.3.tar.gz", hash = "sha256:205029e1cb334c21cb4ec64fc7599be48b859a0fd381a42443cdd600bfe8b16a"}, +] + +[package.dependencies] +pyyaml = "*" +requests = ">=2.30.0,<3.0" +types-PyYAML = "*" +urllib3 = ">=1.25.10,<3.0" + +[package.extras] +tests = ["coverage (>=6.0.0)", "flake8", "mypy", "pytest (>=7.0.0)", "pytest-asyncio", "pytest-cov", "pytest-httpserver", "tomli", "tomli-w", "types-requests"] + +[[package]] +name = "rich" +version = "13.5.3" +description = "Render rich text, tables, progress bars, syntax highlighting, markdown and more to the terminal" +optional = false +python-versions = ">=3.7.0" +files = [ + {file = "rich-13.5.3-py3-none-any.whl", hash = "sha256:9257b468badc3d347e146a4faa268ff229039d4c2d176ab0cffb4c4fbc73d5d9"}, + {file = "rich-13.5.3.tar.gz", hash = "sha256:87b43e0543149efa1253f485cd845bb7ee54df16c9617b8a893650ab84b4acb6"}, +] + +[package.dependencies] +markdown-it-py = ">=2.2.0" +pygments = ">=2.13.0,<3.0.0" +typing-extensions = {version = ">=4.0.0,<5.0", markers = "python_version < \"3.9\""} + +[package.extras] +jupyter = ["ipywidgets (>=7.5.1,<9)"] + +[[package]] +name = "rpds-py" +version = "0.10.3" +description = "Python bindings to Rust's persistent data structures (rpds)" +optional = true +python-versions = ">=3.8" +files = [ + {file = "rpds_py-0.10.3-cp310-cp310-macosx_10_7_x86_64.whl", hash = "sha256:485747ee62da83366a44fbba963c5fe017860ad408ccd6cd99aa66ea80d32b2e"}, + {file = "rpds_py-0.10.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c55f9821f88e8bee4b7a72c82cfb5ecd22b6aad04033334f33c329b29bfa4da0"}, + {file = "rpds_py-0.10.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d3b52a67ac66a3a64a7e710ba629f62d1e26ca0504c29ee8cbd99b97df7079a8"}, + {file = "rpds_py-0.10.3-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:3aed39db2f0ace76faa94f465d4234aac72e2f32b009f15da6492a561b3bbebd"}, + {file = "rpds_py-0.10.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:271c360fdc464fe6a75f13ea0c08ddf71a321f4c55fc20a3fe62ea3ef09df7d9"}, + {file = "rpds_py-0.10.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ef5fddfb264e89c435be4adb3953cef5d2936fdeb4463b4161a6ba2f22e7b740"}, + {file = "rpds_py-0.10.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a771417c9c06c56c9d53d11a5b084d1de75de82978e23c544270ab25e7c066ff"}, + {file = "rpds_py-0.10.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:52b5cbc0469328e58180021138207e6ec91d7ca2e037d3549cc9e34e2187330a"}, + {file = "rpds_py-0.10.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:6ac3fefb0d168c7c6cab24fdfc80ec62cd2b4dfd9e65b84bdceb1cb01d385c33"}, + {file = "rpds_py-0.10.3-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:8d54bbdf5d56e2c8cf81a1857250f3ea132de77af543d0ba5dce667183b61fec"}, + {file = "rpds_py-0.10.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:cd2163f42868865597d89399a01aa33b7594ce8e2c4a28503127c81a2f17784e"}, + {file = "rpds_py-0.10.3-cp310-none-win32.whl", hash = "sha256:ea93163472db26ac6043e8f7f93a05d9b59e0505c760da2a3cd22c7dd7111391"}, + {file = "rpds_py-0.10.3-cp310-none-win_amd64.whl", hash = "sha256:7cd020b1fb41e3ab7716d4d2c3972d4588fdfbab9bfbbb64acc7078eccef8860"}, + {file = "rpds_py-0.10.3-cp311-cp311-macosx_10_7_x86_64.whl", hash = "sha256:1d9b5ee46dcb498fa3e46d4dfabcb531e1f2e76b477e0d99ef114f17bbd38453"}, + {file = "rpds_py-0.10.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:563646d74a4b4456d0cf3b714ca522e725243c603e8254ad85c3b59b7c0c4bf0"}, + {file = "rpds_py-0.10.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e626b864725680cd3904414d72e7b0bd81c0e5b2b53a5b30b4273034253bb41f"}, + {file = "rpds_py-0.10.3-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:485301ee56ce87a51ccb182a4b180d852c5cb2b3cb3a82f7d4714b4141119d8c"}, + {file = "rpds_py-0.10.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:42f712b4668831c0cd85e0a5b5a308700fe068e37dcd24c0062904c4e372b093"}, + {file = "rpds_py-0.10.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6c9141af27a4e5819d74d67d227d5047a20fa3c7d4d9df43037a955b4c748ec5"}, + {file = "rpds_py-0.10.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ef750a20de1b65657a1425f77c525b0183eac63fe7b8f5ac0dd16f3668d3e64f"}, + {file = "rpds_py-0.10.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e1a0ffc39f51aa5f5c22114a8f1906b3c17eba68c5babb86c5f77d8b1bba14d1"}, + {file = "rpds_py-0.10.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:f4c179a7aeae10ddf44c6bac87938134c1379c49c884529f090f9bf05566c836"}, + {file = "rpds_py-0.10.3-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:176287bb998fd1e9846a9b666e240e58f8d3373e3bf87e7642f15af5405187b8"}, + {file = "rpds_py-0.10.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:6446002739ca29249f0beaaf067fcbc2b5aab4bc7ee8fb941bd194947ce19aff"}, + {file = "rpds_py-0.10.3-cp311-none-win32.whl", hash = "sha256:c7aed97f2e676561416c927b063802c8a6285e9b55e1b83213dfd99a8f4f9e48"}, + {file = "rpds_py-0.10.3-cp311-none-win_amd64.whl", hash = "sha256:8bd01ff4032abaed03f2db702fa9a61078bee37add0bd884a6190b05e63b028c"}, + {file = "rpds_py-0.10.3-cp312-cp312-macosx_10_7_x86_64.whl", hash = "sha256:4cf0855a842c5b5c391dd32ca273b09e86abf8367572073bd1edfc52bc44446b"}, + {file = "rpds_py-0.10.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:69b857a7d8bd4f5d6e0db4086da8c46309a26e8cefdfc778c0c5cc17d4b11e08"}, + {file = "rpds_py-0.10.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:975382d9aa90dc59253d6a83a5ca72e07f4ada3ae3d6c0575ced513db322b8ec"}, + {file = "rpds_py-0.10.3-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:35fbd23c1c8732cde7a94abe7fb071ec173c2f58c0bd0d7e5b669fdfc80a2c7b"}, + {file = "rpds_py-0.10.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:106af1653007cc569d5fbb5f08c6648a49fe4de74c2df814e234e282ebc06957"}, + {file = "rpds_py-0.10.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ce5e7504db95b76fc89055c7f41e367eaadef5b1d059e27e1d6eabf2b55ca314"}, + {file = "rpds_py-0.10.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5aca759ada6b1967fcfd4336dcf460d02a8a23e6abe06e90ea7881e5c22c4de6"}, + {file = "rpds_py-0.10.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b5d4bdd697195f3876d134101c40c7d06d46c6ab25159ed5cbd44105c715278a"}, + {file = "rpds_py-0.10.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:a657250807b6efd19b28f5922520ae002a54cb43c2401e6f3d0230c352564d25"}, + {file = "rpds_py-0.10.3-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:177c9dd834cdf4dc39c27436ade6fdf9fe81484758885f2d616d5d03c0a83bd2"}, + {file = "rpds_py-0.10.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e22491d25f97199fc3581ad8dd8ce198d8c8fdb8dae80dea3512e1ce6d5fa99f"}, + {file = "rpds_py-0.10.3-cp38-cp38-macosx_10_7_x86_64.whl", hash = "sha256:2f3e1867dd574014253b4b8f01ba443b9c914e61d45f3674e452a915d6e929a3"}, + {file = "rpds_py-0.10.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:c22211c165166de6683de8136229721f3d5c8606cc2c3d1562da9a3a5058049c"}, + {file = "rpds_py-0.10.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:40bc802a696887b14c002edd43c18082cb7b6f9ee8b838239b03b56574d97f71"}, + {file = "rpds_py-0.10.3-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5e271dd97c7bb8eefda5cca38cd0b0373a1fea50f71e8071376b46968582af9b"}, + {file = "rpds_py-0.10.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:95cde244e7195b2c07ec9b73fa4c5026d4a27233451485caa1cd0c1b55f26dbd"}, + {file = "rpds_py-0.10.3-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:08a80cf4884920863623a9ee9a285ee04cef57ebedc1cc87b3e3e0f24c8acfe5"}, + {file = "rpds_py-0.10.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:763ad59e105fca09705d9f9b29ecffb95ecdc3b0363be3bb56081b2c6de7977a"}, + {file = "rpds_py-0.10.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:187700668c018a7e76e89424b7c1042f317c8df9161f00c0c903c82b0a8cac5c"}, + {file = "rpds_py-0.10.3-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:5267cfda873ad62591b9332fd9472d2409f7cf02a34a9c9cb367e2c0255994bf"}, + {file = "rpds_py-0.10.3-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:2ed83d53a8c5902ec48b90b2ac045e28e1698c0bea9441af9409fc844dc79496"}, + {file = "rpds_py-0.10.3-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:255f1a10ae39b52122cce26ce0781f7a616f502feecce9e616976f6a87992d6b"}, + {file = "rpds_py-0.10.3-cp38-none-win32.whl", hash = "sha256:a019a344312d0b1f429c00d49c3be62fa273d4a1094e1b224f403716b6d03be1"}, + {file = "rpds_py-0.10.3-cp38-none-win_amd64.whl", hash = "sha256:efb9ece97e696bb56e31166a9dd7919f8f0c6b31967b454718c6509f29ef6fee"}, + {file = "rpds_py-0.10.3-cp39-cp39-macosx_10_7_x86_64.whl", hash = "sha256:570cc326e78ff23dec7f41487aa9c3dffd02e5ee9ab43a8f6ccc3df8f9327623"}, + {file = "rpds_py-0.10.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:cff7351c251c7546407827b6a37bcef6416304fc54d12d44dbfecbb717064717"}, + {file = "rpds_py-0.10.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:177914f81f66c86c012311f8c7f46887ec375cfcfd2a2f28233a3053ac93a569"}, + {file = "rpds_py-0.10.3-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:448a66b8266de0b581246ca7cd6a73b8d98d15100fb7165974535fa3b577340e"}, + {file = "rpds_py-0.10.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3bbac1953c17252f9cc675bb19372444aadf0179b5df575ac4b56faaec9f6294"}, + {file = "rpds_py-0.10.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9dd9d9d9e898b9d30683bdd2b6c1849449158647d1049a125879cb397ee9cd12"}, + {file = "rpds_py-0.10.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e8c71ea77536149e36c4c784f6d420ffd20bea041e3ba21ed021cb40ce58e2c9"}, + {file = "rpds_py-0.10.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:16a472300bc6c83fe4c2072cc22b3972f90d718d56f241adabc7ae509f53f154"}, + {file = "rpds_py-0.10.3-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:b9255e7165083de7c1d605e818025e8860636348f34a79d84ec533546064f07e"}, + {file = "rpds_py-0.10.3-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:53d7a3cd46cdc1689296348cb05ffd4f4280035770aee0c8ead3bbd4d6529acc"}, + {file = "rpds_py-0.10.3-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:22da15b902f9f8e267020d1c8bcfc4831ca646fecb60254f7bc71763569f56b1"}, + {file = "rpds_py-0.10.3-cp39-none-win32.whl", hash = "sha256:850c272e0e0d1a5c5d73b1b7871b0a7c2446b304cec55ccdb3eaac0d792bb065"}, + {file = "rpds_py-0.10.3-cp39-none-win_amd64.whl", hash = "sha256:de61e424062173b4f70eec07e12469edde7e17fa180019a2a0d75c13a5c5dc57"}, + {file = "rpds_py-0.10.3-pp310-pypy310_pp73-macosx_10_7_x86_64.whl", hash = "sha256:af247fd4f12cca4129c1b82090244ea5a9d5bb089e9a82feb5a2f7c6a9fe181d"}, + {file = "rpds_py-0.10.3-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:3ad59efe24a4d54c2742929001f2d02803aafc15d6d781c21379e3f7f66ec842"}, + {file = "rpds_py-0.10.3-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:642ed0a209ced4be3a46f8cb094f2d76f1f479e2a1ceca6de6346a096cd3409d"}, + {file = "rpds_py-0.10.3-pp310-pypy310_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:37d0c59548ae56fae01c14998918d04ee0d5d3277363c10208eef8c4e2b68ed6"}, + {file = "rpds_py-0.10.3-pp310-pypy310_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:aad6ed9e70ddfb34d849b761fb243be58c735be6a9265b9060d6ddb77751e3e8"}, + {file = "rpds_py-0.10.3-pp310-pypy310_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8f94fdd756ba1f79f988855d948ae0bad9ddf44df296770d9a58c774cfbcca72"}, + {file = "rpds_py-0.10.3-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:77076bdc8776a2b029e1e6ffbe6d7056e35f56f5e80d9dc0bad26ad4a024a762"}, + {file = "rpds_py-0.10.3-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:87d9b206b1bd7a0523375dc2020a6ce88bca5330682ae2fe25e86fd5d45cea9c"}, + {file = "rpds_py-0.10.3-pp310-pypy310_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:8efaeb08ede95066da3a3e3c420fcc0a21693fcd0c4396d0585b019613d28515"}, + {file = "rpds_py-0.10.3-pp310-pypy310_pp73-musllinux_1_2_i686.whl", hash = "sha256:a4d9bfda3f84fc563868fe25ca160c8ff0e69bc4443c5647f960d59400ce6557"}, + {file = "rpds_py-0.10.3-pp310-pypy310_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:d27aa6bbc1f33be920bb7adbb95581452cdf23005d5611b29a12bb6a3468cc95"}, + {file = "rpds_py-0.10.3-pp38-pypy38_pp73-macosx_10_7_x86_64.whl", hash = "sha256:ed8313809571a5463fd7db43aaca68ecb43ca7a58f5b23b6e6c6c5d02bdc7882"}, + {file = "rpds_py-0.10.3-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:e10e6a1ed2b8661201e79dff5531f8ad4cdd83548a0f81c95cf79b3184b20c33"}, + {file = "rpds_py-0.10.3-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:015de2ce2af1586ff5dc873e804434185199a15f7d96920ce67e50604592cae9"}, + {file = "rpds_py-0.10.3-pp38-pypy38_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ae87137951bb3dc08c7d8bfb8988d8c119f3230731b08a71146e84aaa919a7a9"}, + {file = "rpds_py-0.10.3-pp38-pypy38_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0bb4f48bd0dd18eebe826395e6a48b7331291078a879295bae4e5d053be50d4c"}, + {file = "rpds_py-0.10.3-pp38-pypy38_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:09362f86ec201288d5687d1dc476b07bf39c08478cde837cb710b302864e7ec9"}, + {file = "rpds_py-0.10.3-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:821392559d37759caa67d622d0d2994c7a3f2fb29274948ac799d496d92bca73"}, + {file = "rpds_py-0.10.3-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:7170cbde4070dc3c77dec82abf86f3b210633d4f89550fa0ad2d4b549a05572a"}, + {file = "rpds_py-0.10.3-pp38-pypy38_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:5de11c041486681ce854c814844f4ce3282b6ea1656faae19208ebe09d31c5b8"}, + {file = "rpds_py-0.10.3-pp38-pypy38_pp73-musllinux_1_2_i686.whl", hash = "sha256:4ed172d0c79f156c1b954e99c03bc2e3033c17efce8dd1a7c781bc4d5793dfac"}, + {file = "rpds_py-0.10.3-pp38-pypy38_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:11fdd1192240dda8d6c5d18a06146e9045cb7e3ba7c06de6973000ff035df7c6"}, + {file = "rpds_py-0.10.3-pp39-pypy39_pp73-macosx_10_7_x86_64.whl", hash = "sha256:f602881d80ee4228a2355c68da6b296a296cd22bbb91e5418d54577bbf17fa7c"}, + {file = "rpds_py-0.10.3-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:691d50c99a937709ac4c4cd570d959a006bd6a6d970a484c84cc99543d4a5bbb"}, + {file = "rpds_py-0.10.3-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:24cd91a03543a0f8d09cb18d1cb27df80a84b5553d2bd94cba5979ef6af5c6e7"}, + {file = "rpds_py-0.10.3-pp39-pypy39_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:fc2200e79d75b5238c8d69f6a30f8284290c777039d331e7340b6c17cad24a5a"}, + {file = "rpds_py-0.10.3-pp39-pypy39_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ea65b59882d5fa8c74a23f8960db579e5e341534934f43f3b18ec1839b893e41"}, + {file = "rpds_py-0.10.3-pp39-pypy39_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:829e91f3a8574888b73e7a3feb3b1af698e717513597e23136ff4eba0bc8387a"}, + {file = "rpds_py-0.10.3-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eab75a8569a095f2ad470b342f2751d9902f7944704f0571c8af46bede438475"}, + {file = "rpds_py-0.10.3-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:061c3ff1f51ecec256e916cf71cc01f9975af8fb3af9b94d3c0cc8702cfea637"}, + {file = "rpds_py-0.10.3-pp39-pypy39_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:39d05e65f23a0fe897b6ac395f2a8d48c56ac0f583f5d663e0afec1da89b95da"}, + {file = "rpds_py-0.10.3-pp39-pypy39_pp73-musllinux_1_2_i686.whl", hash = "sha256:4eca20917a06d2fca7628ef3c8b94a8c358f6b43f1a621c9815243462dcccf97"}, + {file = "rpds_py-0.10.3-pp39-pypy39_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:e8d0f0eca087630d58b8c662085529781fd5dc80f0a54eda42d5c9029f812599"}, + {file = "rpds_py-0.10.3.tar.gz", hash = "sha256:fcc1ebb7561a3e24a6588f7c6ded15d80aec22c66a070c757559b57b17ffd1cb"}, +] + +[[package]] +name = "rsa" +version = "4.9" +description = "Pure-Python RSA implementation" +optional = true +python-versions = ">=3.6,<4" +files = [ + {file = "rsa-4.9-py3-none-any.whl", hash = "sha256:90260d9058e514786967344d0ef75fa8727eed8a7d2e43ce9f4bcf1b536174f7"}, + {file = "rsa-4.9.tar.gz", hash = "sha256:e38464a49c6c85d7f1351b0126661487a7e0a14a50f1675ec50eb34d4f20ef21"}, +] + +[package.dependencies] +pyasn1 = ">=0.1.3" + +[[package]] +name = "s3fs" +version = "2023.9.1" +description = "Convenient Filesystem interface over S3" +optional = true +python-versions = ">= 3.8" +files = [ + {file = "s3fs-2023.9.1-py3-none-any.whl", hash = "sha256:3bd1f9f33e4ad090d150301c3b386061cb7085fc8bda3a9ec9198dccca765d6c"}, + {file = "s3fs-2023.9.1.tar.gz", hash = "sha256:42e1821ed94c1607c848853d1d715ebcd25c13380b6f510c2cb498c7e5b3e674"}, +] + +[package.dependencies] +aiobotocore = ">=2.5.4,<2.6.0" +aiohttp = "<4.0.0a0 || >4.0.0a0,<4.0.0a1 || >4.0.0a1" +fsspec = "2023.9.1" + +[package.extras] +awscli = ["aiobotocore[awscli] (>=2.5.4,<2.6.0)"] +boto3 = ["aiobotocore[boto3] (>=2.5.4,<2.6.0)"] + +[[package]] +name = "s3transfer" +version = "0.6.2" +description = "An Amazon S3 Transfer Manager" +optional = false +python-versions = ">= 3.7" +files = [ + {file = "s3transfer-0.6.2-py3-none-any.whl", hash = "sha256:b014be3a8a2aab98cfe1abc7229cc5a9a0cf05eb9c1f2b86b230fd8df3f78084"}, + {file = "s3transfer-0.6.2.tar.gz", hash = "sha256:cab66d3380cca3e70939ef2255d01cd8aece6a4907a9528740f668c4b0611861"}, +] + +[package.dependencies] +botocore = ">=1.12.36,<2.0a.0" + +[package.extras] +crt = ["botocore[crt] (>=1.20.29,<2.0a.0)"] + +[[package]] +name = "setuptools" +version = "68.2.2" +description = "Easily download, build, install, upgrade, and uninstall Python packages" +optional = false +python-versions = ">=3.8" +files = [ + {file = "setuptools-68.2.2-py3-none-any.whl", hash = "sha256:b454a35605876da60632df1a60f736524eb73cc47bbc9f3f1ef1b644de74fd2a"}, + {file = "setuptools-68.2.2.tar.gz", hash = "sha256:4ac1475276d2f1c48684874089fefcd83bd7162ddaafb81fac866ba0db282a87"}, +] + +[package.extras] +docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-hoverxref (<2)", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (>=1,<2)", "sphinx-reredirects", "sphinxcontrib-towncrier"] +testing = ["build[virtualenv]", "filelock (>=3.4.0)", "flake8-2020", "ini2toml[lite] (>=0.9)", "jaraco.develop (>=7.21)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "pip (>=19.1)", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy (>=0.9.1)", "pytest-perf", "pytest-ruff", "pytest-timeout", "pytest-xdist", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel"] +testing-integration = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "packaging (>=23.1)", "pytest", "pytest-enabler", "pytest-xdist", "tomli", "virtualenv (>=13.0.0)", "wheel"] + +[[package]] +name = "six" +version = "1.16.0" +description = "Python 2 and 3 compatibility utilities" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" +files = [ + {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"}, + {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"}, +] + +[[package]] +name = "sortedcontainers" +version = "2.4.0" +description = "Sorted Containers -- Sorted List, Sorted Dict, Sorted Set" +optional = false +python-versions = "*" +files = [ + {file = "sortedcontainers-2.4.0-py2.py3-none-any.whl", hash = "sha256:a163dcaede0f1c021485e957a39245190e74249897e2ae4b2aa38595db237ee0"}, + {file = "sortedcontainers-2.4.0.tar.gz", hash = "sha256:25caa5a06cc30b6b83d11423433f65d1f9d76c4c6a0c90e3379eaa43b9bfdb88"}, +] + +[[package]] +name = "sqlalchemy" +version = "2.0.21" +description = "Database Abstraction Library" +optional = true +python-versions = ">=3.7" +files = [ + {file = "SQLAlchemy-2.0.21-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:1e7dc99b23e33c71d720c4ae37ebb095bebebbd31a24b7d99dfc4753d2803ede"}, + {file = "SQLAlchemy-2.0.21-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:7f0c4ee579acfe6c994637527c386d1c22eb60bc1c1d36d940d8477e482095d4"}, + {file = "SQLAlchemy-2.0.21-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3f7d57a7e140efe69ce2d7b057c3f9a595f98d0bbdfc23fd055efdfbaa46e3a5"}, + {file = "SQLAlchemy-2.0.21-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7ca38746eac23dd7c20bec9278d2058c7ad662b2f1576e4c3dbfcd7c00cc48fa"}, + {file = "SQLAlchemy-2.0.21-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:3cf229704074bce31f7f47d12883afee3b0a02bb233a0ba45ddbfe542939cca4"}, + {file = "SQLAlchemy-2.0.21-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:fb87f763b5d04a82ae84ccff25554ffd903baafba6698e18ebaf32561f2fe4aa"}, + {file = "SQLAlchemy-2.0.21-cp310-cp310-win32.whl", hash = "sha256:89e274604abb1a7fd5c14867a412c9d49c08ccf6ce3e1e04fffc068b5b6499d4"}, + {file = "SQLAlchemy-2.0.21-cp310-cp310-win_amd64.whl", hash = "sha256:e36339a68126ffb708dc6d1948161cea2a9e85d7d7b0c54f6999853d70d44430"}, + {file = "SQLAlchemy-2.0.21-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:bf8eebccc66829010f06fbd2b80095d7872991bfe8415098b9fe47deaaa58063"}, + {file = "SQLAlchemy-2.0.21-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:b977bfce15afa53d9cf6a632482d7968477625f030d86a109f7bdfe8ce3c064a"}, + {file = "SQLAlchemy-2.0.21-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6ff3dc2f60dbf82c9e599c2915db1526d65415be323464f84de8db3e361ba5b9"}, + {file = "SQLAlchemy-2.0.21-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:44ac5c89b6896f4740e7091f4a0ff2e62881da80c239dd9408f84f75a293dae9"}, + {file = "SQLAlchemy-2.0.21-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:87bf91ebf15258c4701d71dcdd9c4ba39521fb6a37379ea68088ce8cd869b446"}, + {file = "SQLAlchemy-2.0.21-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:b69f1f754d92eb1cc6b50938359dead36b96a1dcf11a8670bff65fd9b21a4b09"}, + {file = "SQLAlchemy-2.0.21-cp311-cp311-win32.whl", hash = "sha256:af520a730d523eab77d754f5cf44cc7dd7ad2d54907adeb3233177eeb22f271b"}, + {file = "SQLAlchemy-2.0.21-cp311-cp311-win_amd64.whl", hash = "sha256:141675dae56522126986fa4ca713739d00ed3a6f08f3c2eb92c39c6dfec463ce"}, + {file = "SQLAlchemy-2.0.21-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:7614f1eab4336df7dd6bee05bc974f2b02c38d3d0c78060c5faa4cd1ca2af3b8"}, + {file = "SQLAlchemy-2.0.21-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d59cb9e20d79686aa473e0302e4a82882d7118744d30bb1dfb62d3c47141b3ec"}, + {file = "SQLAlchemy-2.0.21-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a95aa0672e3065d43c8aa80080cdd5cc40fe92dc873749e6c1cf23914c4b83af"}, + {file = "SQLAlchemy-2.0.21-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:8c323813963b2503e54d0944813cd479c10c636e3ee223bcbd7bd478bf53c178"}, + {file = "SQLAlchemy-2.0.21-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:419b1276b55925b5ac9b4c7044e999f1787c69761a3c9756dec6e5c225ceca01"}, + {file = "SQLAlchemy-2.0.21-cp37-cp37m-win32.whl", hash = "sha256:4615623a490e46be85fbaa6335f35cf80e61df0783240afe7d4f544778c315a9"}, + {file = "SQLAlchemy-2.0.21-cp37-cp37m-win_amd64.whl", hash = "sha256:cca720d05389ab1a5877ff05af96551e58ba65e8dc65582d849ac83ddde3e231"}, + {file = "SQLAlchemy-2.0.21-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:b4eae01faee9f2b17f08885e3f047153ae0416648f8e8c8bd9bc677c5ce64be9"}, + {file = "SQLAlchemy-2.0.21-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:3eb7c03fe1cd3255811cd4e74db1ab8dca22074d50cd8937edf4ef62d758cdf4"}, + {file = "SQLAlchemy-2.0.21-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c2d494b6a2a2d05fb99f01b84cc9af9f5f93bf3e1e5dbdafe4bed0c2823584c1"}, + {file = "SQLAlchemy-2.0.21-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b19ae41ef26c01a987e49e37c77b9ad060c59f94d3b3efdfdbf4f3daaca7b5fe"}, + {file = "SQLAlchemy-2.0.21-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:fc6b15465fabccc94bf7e38777d665b6a4f95efd1725049d6184b3a39fd54880"}, + {file = "SQLAlchemy-2.0.21-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:014794b60d2021cc8ae0f91d4d0331fe92691ae5467a00841f7130fe877b678e"}, + {file = "SQLAlchemy-2.0.21-cp38-cp38-win32.whl", hash = "sha256:0268256a34806e5d1c8f7ee93277d7ea8cc8ae391f487213139018b6805aeaf6"}, + {file = "SQLAlchemy-2.0.21-cp38-cp38-win_amd64.whl", hash = "sha256:73c079e21d10ff2be54a4699f55865d4b275fd6c8bd5d90c5b1ef78ae0197301"}, + {file = "SQLAlchemy-2.0.21-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:785e2f2c1cb50d0a44e2cdeea5fd36b5bf2d79c481c10f3a88a8be4cfa2c4615"}, + {file = "SQLAlchemy-2.0.21-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c111cd40910ffcb615b33605fc8f8e22146aeb7933d06569ac90f219818345ef"}, + {file = "SQLAlchemy-2.0.21-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c9cba4e7369de663611ce7460a34be48e999e0bbb1feb9130070f0685e9a6b66"}, + {file = "SQLAlchemy-2.0.21-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:50a69067af86ec7f11a8e50ba85544657b1477aabf64fa447fd3736b5a0a4f67"}, + {file = "SQLAlchemy-2.0.21-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:ccb99c3138c9bde118b51a289d90096a3791658da9aea1754667302ed6564f6e"}, + {file = "SQLAlchemy-2.0.21-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:513fd5b6513d37e985eb5b7ed89da5fd9e72354e3523980ef00d439bc549c9e9"}, + {file = "SQLAlchemy-2.0.21-cp39-cp39-win32.whl", hash = "sha256:f9fefd6298433b6e9188252f3bff53b9ff0443c8fde27298b8a2b19f6617eeb9"}, + {file = "SQLAlchemy-2.0.21-cp39-cp39-win_amd64.whl", hash = "sha256:2e617727fe4091cedb3e4409b39368f424934c7faa78171749f704b49b4bb4ce"}, + {file = "SQLAlchemy-2.0.21-py3-none-any.whl", hash = "sha256:ea7da25ee458d8f404b93eb073116156fd7d8c2a776d8311534851f28277b4ce"}, + {file = "SQLAlchemy-2.0.21.tar.gz", hash = "sha256:05b971ab1ac2994a14c56b35eaaa91f86ba080e9ad481b20d99d77f381bb6258"}, +] + +[package.dependencies] +greenlet = {version = "!=0.4.17", markers = "platform_machine == \"aarch64\" or platform_machine == \"ppc64le\" or platform_machine == \"x86_64\" or platform_machine == \"amd64\" or platform_machine == \"AMD64\" or platform_machine == \"win32\" or platform_machine == \"WIN32\""} +typing-extensions = ">=4.2.0" + +[package.extras] +aiomysql = ["aiomysql (>=0.2.0)", "greenlet (!=0.4.17)"] +aiosqlite = ["aiosqlite", "greenlet (!=0.4.17)", "typing-extensions (!=3.10.0.1)"] +asyncio = ["greenlet (!=0.4.17)"] +asyncmy = ["asyncmy (>=0.2.3,!=0.2.4,!=0.2.6)", "greenlet (!=0.4.17)"] +mariadb-connector = ["mariadb (>=1.0.1,!=1.1.2,!=1.1.5)"] +mssql = ["pyodbc"] +mssql-pymssql = ["pymssql"] +mssql-pyodbc = ["pyodbc"] +mypy = ["mypy (>=0.910)"] +mysql = ["mysqlclient (>=1.4.0)"] +mysql-connector = ["mysql-connector-python"] +oracle = ["cx-oracle (>=7)"] +oracle-oracledb = ["oracledb (>=1.0.1)"] +postgresql = ["psycopg2 (>=2.7)"] +postgresql-asyncpg = ["asyncpg", "greenlet (!=0.4.17)"] +postgresql-pg8000 = ["pg8000 (>=1.29.1)"] +postgresql-psycopg = ["psycopg (>=3.0.7)"] +postgresql-psycopg2binary = ["psycopg2-binary"] +postgresql-psycopg2cffi = ["psycopg2cffi"] +postgresql-psycopgbinary = ["psycopg[binary] (>=3.0.7)"] +pymysql = ["pymysql"] +sqlcipher = ["sqlcipher3-binary"] + +[[package]] +name = "strictyaml" +version = "1.7.3" +description = "Strict, typed YAML parser" +optional = false +python-versions = ">=3.7.0" +files = [ + {file = "strictyaml-1.7.3-py3-none-any.whl", hash = "sha256:fb5c8a4edb43bebb765959e420f9b3978d7f1af88c80606c03fb420888f5d1c7"}, + {file = "strictyaml-1.7.3.tar.gz", hash = "sha256:22f854a5fcab42b5ddba8030a0e4be51ca89af0267961c8d6cfa86395586c407"}, +] + +[package.dependencies] +python-dateutil = ">=2.6.0" + +[[package]] +name = "thrift" +version = "0.16.0" +description = "Python bindings for the Apache Thrift RPC system" +optional = true +python-versions = "*" +files = [ + {file = "thrift-0.16.0.tar.gz", hash = "sha256:2b5b6488fcded21f9d312aa23c9ff6a0195d0f6ae26ddbd5ad9e3e25dfc14408"}, +] + +[package.dependencies] +six = ">=1.7.2" + +[package.extras] +all = ["tornado (>=4.0)", "twisted"] +tornado = ["tornado (>=4.0)"] +twisted = ["twisted"] + +[[package]] +name = "tomli" +version = "2.0.1" +description = "A lil' TOML parser" +optional = false +python-versions = ">=3.7" +files = [ + {file = "tomli-2.0.1-py3-none-any.whl", hash = "sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc"}, + {file = "tomli-2.0.1.tar.gz", hash = "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f"}, +] + +[[package]] +name = "types-pyyaml" +version = "6.0.12.11" +description = "Typing stubs for PyYAML" +optional = false +python-versions = "*" +files = [ + {file = "types-PyYAML-6.0.12.11.tar.gz", hash = "sha256:7d340b19ca28cddfdba438ee638cd4084bde213e501a3978738543e27094775b"}, + {file = "types_PyYAML-6.0.12.11-py3-none-any.whl", hash = "sha256:a461508f3096d1d5810ec5ab95d7eeecb651f3a15b71959999988942063bf01d"}, +] + +[[package]] +name = "typing-extensions" +version = "4.7.1" +description = "Backported and Experimental Type Hints for Python 3.7+" +optional = false +python-versions = ">=3.7" +files = [ + {file = "typing_extensions-4.7.1-py3-none-any.whl", hash = "sha256:440d5dd3af93b060174bf433bccd69b0babc3b15b1a8dca43789fd7f61514b36"}, + {file = "typing_extensions-4.7.1.tar.gz", hash = "sha256:b75ddc264f0ba5615db7ba217daeb99701ad295353c45f9e95963337ceeeffb2"}, +] + +[[package]] +name = "tzdata" +version = "2023.3" +description = "Provider of IANA time zone data" +optional = true +python-versions = ">=2" +files = [ + {file = "tzdata-2023.3-py2.py3-none-any.whl", hash = "sha256:7e65763eef3120314099b6939b5546db7adce1e7d6f2e179e3df563c70511eda"}, + {file = "tzdata-2023.3.tar.gz", hash = "sha256:11ef1e08e54acb0d4f95bdb1be05da659673de4acbd21bf9c69e94cc5e907a3a"}, +] + +[[package]] +name = "urllib3" +version = "1.26.16" +description = "HTTP library with thread-safe connection pooling, file post, and more." +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*" +files = [ + {file = "urllib3-1.26.16-py2.py3-none-any.whl", hash = "sha256:8d36afa7616d8ab714608411b4a3b13e58f463aee519024578e062e141dce20f"}, + {file = "urllib3-1.26.16.tar.gz", hash = "sha256:8f135f6502756bde6b2a9b28989df5fbe87c9970cecaa69041edcce7f0589b14"}, +] + +[package.extras] +brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)", "brotlipy (>=0.6.0)"] +secure = ["certifi", "cryptography (>=1.3.4)", "idna (>=2.0.0)", "ipaddress", "pyOpenSSL (>=0.14)", "urllib3-secure-extra"] +socks = ["PySocks (>=1.5.6,!=1.5.7,<2.0)"] + +[[package]] +name = "virtualenv" +version = "20.24.5" +description = "Virtual Python Environment builder" +optional = false +python-versions = ">=3.7" +files = [ + {file = "virtualenv-20.24.5-py3-none-any.whl", hash = "sha256:b80039f280f4919c77b30f1c23294ae357c4c8701042086e3fc005963e4e537b"}, + {file = "virtualenv-20.24.5.tar.gz", hash = "sha256:e8361967f6da6fbdf1426483bfe9fca8287c242ac0bc30429905721cefbff752"}, +] + +[package.dependencies] +distlib = ">=0.3.7,<1" +filelock = ">=3.12.2,<4" +platformdirs = ">=3.9.1,<4" + +[package.extras] +docs = ["furo (>=2023.7.26)", "proselint (>=0.13)", "sphinx (>=7.1.2)", "sphinx-argparse (>=0.4)", "sphinxcontrib-towncrier (>=0.2.1a0)", "towncrier (>=23.6)"] +test = ["covdefaults (>=2.3)", "coverage (>=7.2.7)", "coverage-enable-subprocess (>=1)", "flaky (>=3.7)", "packaging (>=23.1)", "pytest (>=7.4)", "pytest-env (>=0.8.2)", "pytest-freezer (>=0.4.8)", "pytest-mock (>=3.11.1)", "pytest-randomly (>=3.12)", "pytest-timeout (>=2.1)", "setuptools (>=68)", "time-machine (>=2.10)"] + +[[package]] +name = "werkzeug" +version = "2.3.7" +description = "The comprehensive WSGI web application library." +optional = false +python-versions = ">=3.8" +files = [ + {file = "werkzeug-2.3.7-py3-none-any.whl", hash = "sha256:effc12dba7f3bd72e605ce49807bbe692bd729c3bb122a3b91747a6ae77df528"}, + {file = "werkzeug-2.3.7.tar.gz", hash = "sha256:2b8c0e447b4b9dbcc85dd97b6eeb4dcbaf6c8b6c3be0bd654e25553e0a2157d8"}, +] + +[package.dependencies] +MarkupSafe = ">=2.1.1" + +[package.extras] +watchdog = ["watchdog (>=2.3)"] + +[[package]] +name = "wrapt" +version = "1.15.0" +description = "Module for decorators, wrappers and monkey patching." +optional = true +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,>=2.7" +files = [ + {file = "wrapt-1.15.0-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:ca1cccf838cd28d5a0883b342474c630ac48cac5df0ee6eacc9c7290f76b11c1"}, + {file = "wrapt-1.15.0-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:e826aadda3cae59295b95343db8f3d965fb31059da7de01ee8d1c40a60398b29"}, + {file = "wrapt-1.15.0-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:5fc8e02f5984a55d2c653f5fea93531e9836abbd84342c1d1e17abc4a15084c2"}, + {file = "wrapt-1.15.0-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:96e25c8603a155559231c19c0349245eeb4ac0096fe3c1d0be5c47e075bd4f46"}, + {file = "wrapt-1.15.0-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:40737a081d7497efea35ab9304b829b857f21558acfc7b3272f908d33b0d9d4c"}, + {file = "wrapt-1.15.0-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:f87ec75864c37c4c6cb908d282e1969e79763e0d9becdfe9fe5473b7bb1e5f09"}, + {file = "wrapt-1.15.0-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:1286eb30261894e4c70d124d44b7fd07825340869945c79d05bda53a40caa079"}, + {file = "wrapt-1.15.0-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:493d389a2b63c88ad56cdc35d0fa5752daac56ca755805b1b0c530f785767d5e"}, + {file = "wrapt-1.15.0-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:58d7a75d731e8c63614222bcb21dd992b4ab01a399f1f09dd82af17bbfc2368a"}, + {file = "wrapt-1.15.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:21f6d9a0d5b3a207cdf7acf8e58d7d13d463e639f0c7e01d82cdb671e6cb7923"}, + {file = "wrapt-1.15.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ce42618f67741d4697684e501ef02f29e758a123aa2d669e2d964ff734ee00ee"}, + {file = "wrapt-1.15.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:41d07d029dd4157ae27beab04d22b8e261eddfc6ecd64ff7000b10dc8b3a5727"}, + {file = "wrapt-1.15.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:54accd4b8bc202966bafafd16e69da9d5640ff92389d33d28555c5fd4f25ccb7"}, + {file = "wrapt-1.15.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2fbfbca668dd15b744418265a9607baa970c347eefd0db6a518aaf0cfbd153c0"}, + {file = "wrapt-1.15.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:76e9c727a874b4856d11a32fb0b389afc61ce8aaf281ada613713ddeadd1cfec"}, + {file = "wrapt-1.15.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:e20076a211cd6f9b44a6be58f7eeafa7ab5720eb796975d0c03f05b47d89eb90"}, + {file = "wrapt-1.15.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:a74d56552ddbde46c246b5b89199cb3fd182f9c346c784e1a93e4dc3f5ec9975"}, + {file = "wrapt-1.15.0-cp310-cp310-win32.whl", hash = "sha256:26458da5653aa5b3d8dc8b24192f574a58984c749401f98fff994d41d3f08da1"}, + {file = "wrapt-1.15.0-cp310-cp310-win_amd64.whl", hash = "sha256:75760a47c06b5974aa5e01949bf7e66d2af4d08cb8c1d6516af5e39595397f5e"}, + {file = "wrapt-1.15.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ba1711cda2d30634a7e452fc79eabcadaffedf241ff206db2ee93dd2c89a60e7"}, + {file = "wrapt-1.15.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:56374914b132c702aa9aa9959c550004b8847148f95e1b824772d453ac204a72"}, + {file = "wrapt-1.15.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a89ce3fd220ff144bd9d54da333ec0de0399b52c9ac3d2ce34b569cf1a5748fb"}, + {file = "wrapt-1.15.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3bbe623731d03b186b3d6b0d6f51865bf598587c38d6f7b0be2e27414f7f214e"}, + {file = "wrapt-1.15.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3abbe948c3cbde2689370a262a8d04e32ec2dd4f27103669a45c6929bcdbfe7c"}, + {file = "wrapt-1.15.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:b67b819628e3b748fd3c2192c15fb951f549d0f47c0449af0764d7647302fda3"}, + {file = "wrapt-1.15.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:7eebcdbe3677e58dd4c0e03b4f2cfa346ed4049687d839adad68cc38bb559c92"}, + {file = "wrapt-1.15.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:74934ebd71950e3db69960a7da29204f89624dde411afbfb3b4858c1409b1e98"}, + {file = "wrapt-1.15.0-cp311-cp311-win32.whl", hash = "sha256:bd84395aab8e4d36263cd1b9308cd504f6cf713b7d6d3ce25ea55670baec5416"}, + {file = "wrapt-1.15.0-cp311-cp311-win_amd64.whl", hash = "sha256:a487f72a25904e2b4bbc0817ce7a8de94363bd7e79890510174da9d901c38705"}, + {file = "wrapt-1.15.0-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:4ff0d20f2e670800d3ed2b220d40984162089a6e2c9646fdb09b85e6f9a8fc29"}, + {file = "wrapt-1.15.0-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:9ed6aa0726b9b60911f4aed8ec5b8dd7bf3491476015819f56473ffaef8959bd"}, + {file = "wrapt-1.15.0-cp35-cp35m-manylinux2010_i686.whl", hash = "sha256:896689fddba4f23ef7c718279e42f8834041a21342d95e56922e1c10c0cc7afb"}, + {file = "wrapt-1.15.0-cp35-cp35m-manylinux2010_x86_64.whl", hash = "sha256:75669d77bb2c071333417617a235324a1618dba66f82a750362eccbe5b61d248"}, + {file = "wrapt-1.15.0-cp35-cp35m-win32.whl", hash = "sha256:fbec11614dba0424ca72f4e8ba3c420dba07b4a7c206c8c8e4e73f2e98f4c559"}, + {file = "wrapt-1.15.0-cp35-cp35m-win_amd64.whl", hash = "sha256:fd69666217b62fa5d7c6aa88e507493a34dec4fa20c5bd925e4bc12fce586639"}, + {file = "wrapt-1.15.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:b0724f05c396b0a4c36a3226c31648385deb6a65d8992644c12a4963c70326ba"}, + {file = "wrapt-1.15.0-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bbeccb1aa40ab88cd29e6c7d8585582c99548f55f9b2581dfc5ba68c59a85752"}, + {file = "wrapt-1.15.0-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:38adf7198f8f154502883242f9fe7333ab05a5b02de7d83aa2d88ea621f13364"}, + {file = "wrapt-1.15.0-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:578383d740457fa790fdf85e6d346fda1416a40549fe8db08e5e9bd281c6a475"}, + {file = "wrapt-1.15.0-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:a4cbb9ff5795cd66f0066bdf5947f170f5d63a9274f99bdbca02fd973adcf2a8"}, + {file = "wrapt-1.15.0-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:af5bd9ccb188f6a5fdda9f1f09d9f4c86cc8a539bd48a0bfdc97723970348418"}, + {file = "wrapt-1.15.0-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:b56d5519e470d3f2fe4aa7585f0632b060d532d0696c5bdfb5e8319e1d0f69a2"}, + {file = "wrapt-1.15.0-cp36-cp36m-win32.whl", hash = "sha256:77d4c1b881076c3ba173484dfa53d3582c1c8ff1f914c6461ab70c8428b796c1"}, + {file = "wrapt-1.15.0-cp36-cp36m-win_amd64.whl", hash = "sha256:077ff0d1f9d9e4ce6476c1a924a3332452c1406e59d90a2cf24aeb29eeac9420"}, + {file = "wrapt-1.15.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:5c5aa28df055697d7c37d2099a7bc09f559d5053c3349b1ad0c39000e611d317"}, + {file = "wrapt-1.15.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3a8564f283394634a7a7054b7983e47dbf39c07712d7b177b37e03f2467a024e"}, + {file = "wrapt-1.15.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:780c82a41dc493b62fc5884fb1d3a3b81106642c5c5c78d6a0d4cbe96d62ba7e"}, + {file = "wrapt-1.15.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e169e957c33576f47e21864cf3fc9ff47c223a4ebca8960079b8bd36cb014fd0"}, + {file = "wrapt-1.15.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:b02f21c1e2074943312d03d243ac4388319f2456576b2c6023041c4d57cd7019"}, + {file = "wrapt-1.15.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:f2e69b3ed24544b0d3dbe2c5c0ba5153ce50dcebb576fdc4696d52aa22db6034"}, + {file = "wrapt-1.15.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:d787272ed958a05b2c86311d3a4135d3c2aeea4fc655705f074130aa57d71653"}, + {file = "wrapt-1.15.0-cp37-cp37m-win32.whl", hash = "sha256:02fce1852f755f44f95af51f69d22e45080102e9d00258053b79367d07af39c0"}, + {file = "wrapt-1.15.0-cp37-cp37m-win_amd64.whl", hash = "sha256:abd52a09d03adf9c763d706df707c343293d5d106aea53483e0ec8d9e310ad5e"}, + {file = "wrapt-1.15.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:cdb4f085756c96a3af04e6eca7f08b1345e94b53af8921b25c72f096e704e145"}, + {file = "wrapt-1.15.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:230ae493696a371f1dbffaad3dafbb742a4d27a0afd2b1aecebe52b740167e7f"}, + {file = "wrapt-1.15.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:63424c681923b9f3bfbc5e3205aafe790904053d42ddcc08542181a30a7a51bd"}, + {file = "wrapt-1.15.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d6bcbfc99f55655c3d93feb7ef3800bd5bbe963a755687cbf1f490a71fb7794b"}, + {file = "wrapt-1.15.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c99f4309f5145b93eca6e35ac1a988f0dc0a7ccf9ccdcd78d3c0adf57224e62f"}, + {file = "wrapt-1.15.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:b130fe77361d6771ecf5a219d8e0817d61b236b7d8b37cc045172e574ed219e6"}, + {file = "wrapt-1.15.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:96177eb5645b1c6985f5c11d03fc2dbda9ad24ec0f3a46dcce91445747e15094"}, + {file = "wrapt-1.15.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:d5fe3e099cf07d0fb5a1e23d399e5d4d1ca3e6dfcbe5c8570ccff3e9208274f7"}, + {file = "wrapt-1.15.0-cp38-cp38-win32.whl", hash = "sha256:abd8f36c99512755b8456047b7be10372fca271bf1467a1caa88db991e7c421b"}, + {file = "wrapt-1.15.0-cp38-cp38-win_amd64.whl", hash = "sha256:b06fa97478a5f478fb05e1980980a7cdf2712015493b44d0c87606c1513ed5b1"}, + {file = "wrapt-1.15.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:2e51de54d4fb8fb50d6ee8327f9828306a959ae394d3e01a1ba8b2f937747d86"}, + {file = "wrapt-1.15.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:0970ddb69bba00670e58955f8019bec4a42d1785db3faa043c33d81de2bf843c"}, + {file = "wrapt-1.15.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:76407ab327158c510f44ded207e2f76b657303e17cb7a572ffe2f5a8a48aa04d"}, + {file = "wrapt-1.15.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cd525e0e52a5ff16653a3fc9e3dd827981917d34996600bbc34c05d048ca35cc"}, + {file = "wrapt-1.15.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9d37ac69edc5614b90516807de32d08cb8e7b12260a285ee330955604ed9dd29"}, + {file = "wrapt-1.15.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:078e2a1a86544e644a68422f881c48b84fef6d18f8c7a957ffd3f2e0a74a0d4a"}, + {file = "wrapt-1.15.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:2cf56d0e237280baed46f0b5316661da892565ff58309d4d2ed7dba763d984b8"}, + {file = "wrapt-1.15.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:7dc0713bf81287a00516ef43137273b23ee414fe41a3c14be10dd95ed98a2df9"}, + {file = "wrapt-1.15.0-cp39-cp39-win32.whl", hash = "sha256:46ed616d5fb42f98630ed70c3529541408166c22cdfd4540b88d5f21006b0eff"}, + {file = "wrapt-1.15.0-cp39-cp39-win_amd64.whl", hash = "sha256:eef4d64c650f33347c1f9266fa5ae001440b232ad9b98f1f43dfe7a79435c0a6"}, + {file = "wrapt-1.15.0-py3-none-any.whl", hash = "sha256:64b1df0f83706b4ef4cfb4fb0e4c2669100fd7ecacfb59e091fad300d4e04640"}, + {file = "wrapt-1.15.0.tar.gz", hash = "sha256:d06730c6aed78cee4126234cf2d071e01b44b915e725a6cb439a879ec9754a3a"}, +] + +[[package]] +name = "xmltodict" +version = "0.13.0" +description = "Makes working with XML feel like you are working with JSON" +optional = false +python-versions = ">=3.4" +files = [ + {file = "xmltodict-0.13.0-py2.py3-none-any.whl", hash = "sha256:aa89e8fd76320154a40d19a0df04a4695fb9dc5ba977cbb68ab3e4eb225e7852"}, + {file = "xmltodict-0.13.0.tar.gz", hash = "sha256:341595a488e3e01a85a9d8911d8912fd922ede5fecc4dce437eb4b6c8d037e56"}, +] + +[[package]] +name = "yarl" +version = "1.9.2" +description = "Yet another URL library" +optional = true +python-versions = ">=3.7" +files = [ + {file = "yarl-1.9.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:8c2ad583743d16ddbdf6bb14b5cd76bf43b0d0006e918809d5d4ddf7bde8dd82"}, + {file = "yarl-1.9.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:82aa6264b36c50acfb2424ad5ca537a2060ab6de158a5bd2a72a032cc75b9eb8"}, + {file = "yarl-1.9.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c0c77533b5ed4bcc38e943178ccae29b9bcf48ffd1063f5821192f23a1bd27b9"}, + {file = "yarl-1.9.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ee4afac41415d52d53a9833ebae7e32b344be72835bbb589018c9e938045a560"}, + {file = "yarl-1.9.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9bf345c3a4f5ba7f766430f97f9cc1320786f19584acc7086491f45524a551ac"}, + {file = "yarl-1.9.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2a96c19c52ff442a808c105901d0bdfd2e28575b3d5f82e2f5fd67e20dc5f4ea"}, + {file = "yarl-1.9.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:891c0e3ec5ec881541f6c5113d8df0315ce5440e244a716b95f2525b7b9f3608"}, + {file = "yarl-1.9.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c3a53ba34a636a256d767c086ceb111358876e1fb6b50dfc4d3f4951d40133d5"}, + {file = "yarl-1.9.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:566185e8ebc0898b11f8026447eacd02e46226716229cea8db37496c8cdd26e0"}, + {file = "yarl-1.9.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:2b0738fb871812722a0ac2154be1f049c6223b9f6f22eec352996b69775b36d4"}, + {file = "yarl-1.9.2-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:32f1d071b3f362c80f1a7d322bfd7b2d11e33d2adf395cc1dd4df36c9c243095"}, + {file = "yarl-1.9.2-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:e9fdc7ac0d42bc3ea78818557fab03af6181e076a2944f43c38684b4b6bed8e3"}, + {file = "yarl-1.9.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:56ff08ab5df8429901ebdc5d15941b59f6253393cb5da07b4170beefcf1b2528"}, + {file = "yarl-1.9.2-cp310-cp310-win32.whl", hash = "sha256:8ea48e0a2f931064469bdabca50c2f578b565fc446f302a79ba6cc0ee7f384d3"}, + {file = "yarl-1.9.2-cp310-cp310-win_amd64.whl", hash = "sha256:50f33040f3836e912ed16d212f6cc1efb3231a8a60526a407aeb66c1c1956dde"}, + {file = "yarl-1.9.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:646d663eb2232d7909e6601f1a9107e66f9791f290a1b3dc7057818fe44fc2b6"}, + {file = "yarl-1.9.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:aff634b15beff8902d1f918012fc2a42e0dbae6f469fce134c8a0dc51ca423bb"}, + {file = "yarl-1.9.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a83503934c6273806aed765035716216cc9ab4e0364f7f066227e1aaea90b8d0"}, + {file = "yarl-1.9.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b25322201585c69abc7b0e89e72790469f7dad90d26754717f3310bfe30331c2"}, + {file = "yarl-1.9.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:22a94666751778629f1ec4280b08eb11815783c63f52092a5953faf73be24191"}, + {file = "yarl-1.9.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8ec53a0ea2a80c5cd1ab397925f94bff59222aa3cf9c6da938ce05c9ec20428d"}, + {file = "yarl-1.9.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:159d81f22d7a43e6eabc36d7194cb53f2f15f498dbbfa8edc8a3239350f59fe7"}, + {file = "yarl-1.9.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:832b7e711027c114d79dffb92576acd1bd2decc467dec60e1cac96912602d0e6"}, + {file = "yarl-1.9.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:95d2ecefbcf4e744ea952d073c6922e72ee650ffc79028eb1e320e732898d7e8"}, + {file = "yarl-1.9.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:d4e2c6d555e77b37288eaf45b8f60f0737c9efa3452c6c44626a5455aeb250b9"}, + {file = "yarl-1.9.2-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:783185c75c12a017cc345015ea359cc801c3b29a2966c2655cd12b233bf5a2be"}, + {file = "yarl-1.9.2-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:b8cc1863402472f16c600e3e93d542b7e7542a540f95c30afd472e8e549fc3f7"}, + {file = "yarl-1.9.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:822b30a0f22e588b32d3120f6d41e4ed021806418b4c9f0bc3048b8c8cb3f92a"}, + {file = "yarl-1.9.2-cp311-cp311-win32.whl", hash = "sha256:a60347f234c2212a9f0361955007fcf4033a75bf600a33c88a0a8e91af77c0e8"}, + {file = "yarl-1.9.2-cp311-cp311-win_amd64.whl", hash = "sha256:be6b3fdec5c62f2a67cb3f8c6dbf56bbf3f61c0f046f84645cd1ca73532ea051"}, + {file = "yarl-1.9.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:38a3928ae37558bc1b559f67410df446d1fbfa87318b124bf5032c31e3447b74"}, + {file = "yarl-1.9.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ac9bb4c5ce3975aeac288cfcb5061ce60e0d14d92209e780c93954076c7c4367"}, + {file = "yarl-1.9.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3da8a678ca8b96c8606bbb8bfacd99a12ad5dd288bc6f7979baddd62f71c63ef"}, + {file = "yarl-1.9.2-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:13414591ff516e04fcdee8dc051c13fd3db13b673c7a4cb1350e6b2ad9639ad3"}, + {file = "yarl-1.9.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bf74d08542c3a9ea97bb8f343d4fcbd4d8f91bba5ec9d5d7f792dbe727f88938"}, + {file = "yarl-1.9.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6e7221580dc1db478464cfeef9b03b95c5852cc22894e418562997df0d074ccc"}, + {file = "yarl-1.9.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:494053246b119b041960ddcd20fd76224149cfea8ed8777b687358727911dd33"}, + {file = "yarl-1.9.2-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:52a25809fcbecfc63ac9ba0c0fb586f90837f5425edfd1ec9f3372b119585e45"}, + {file = "yarl-1.9.2-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:e65610c5792870d45d7b68c677681376fcf9cc1c289f23e8e8b39c1485384185"}, + {file = "yarl-1.9.2-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:1b1bba902cba32cdec51fca038fd53f8beee88b77efc373968d1ed021024cc04"}, + {file = "yarl-1.9.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:662e6016409828ee910f5d9602a2729a8a57d74b163c89a837de3fea050c7582"}, + {file = "yarl-1.9.2-cp37-cp37m-win32.whl", hash = "sha256:f364d3480bffd3aa566e886587eaca7c8c04d74f6e8933f3f2c996b7f09bee1b"}, + {file = "yarl-1.9.2-cp37-cp37m-win_amd64.whl", hash = "sha256:6a5883464143ab3ae9ba68daae8e7c5c95b969462bbe42e2464d60e7e2698368"}, + {file = "yarl-1.9.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:5610f80cf43b6202e2c33ba3ec2ee0a2884f8f423c8f4f62906731d876ef4fac"}, + {file = "yarl-1.9.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:b9a4e67ad7b646cd6f0938c7ebfd60e481b7410f574c560e455e938d2da8e0f4"}, + {file = "yarl-1.9.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:83fcc480d7549ccebe9415d96d9263e2d4226798c37ebd18c930fce43dfb9574"}, + {file = "yarl-1.9.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5fcd436ea16fee7d4207c045b1e340020e58a2597301cfbcfdbe5abd2356c2fb"}, + {file = "yarl-1.9.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:84e0b1599334b1e1478db01b756e55937d4614f8654311eb26012091be109d59"}, + {file = "yarl-1.9.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3458a24e4ea3fd8930e934c129b676c27452e4ebda80fbe47b56d8c6c7a63a9e"}, + {file = "yarl-1.9.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:838162460b3a08987546e881a2bfa573960bb559dfa739e7800ceeec92e64417"}, + {file = "yarl-1.9.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f4e2d08f07a3d7d3e12549052eb5ad3eab1c349c53ac51c209a0e5991bbada78"}, + {file = "yarl-1.9.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:de119f56f3c5f0e2fb4dee508531a32b069a5f2c6e827b272d1e0ff5ac040333"}, + {file = "yarl-1.9.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:149ddea5abf329752ea5051b61bd6c1d979e13fbf122d3a1f9f0c8be6cb6f63c"}, + {file = "yarl-1.9.2-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:674ca19cbee4a82c9f54e0d1eee28116e63bc6fd1e96c43031d11cbab8b2afd5"}, + {file = "yarl-1.9.2-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:9b3152f2f5677b997ae6c804b73da05a39daa6a9e85a512e0e6823d81cdad7cc"}, + {file = "yarl-1.9.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:5415d5a4b080dc9612b1b63cba008db84e908b95848369aa1da3686ae27b6d2b"}, + {file = "yarl-1.9.2-cp38-cp38-win32.whl", hash = "sha256:f7a3d8146575e08c29ed1cd287068e6d02f1c7bdff8970db96683b9591b86ee7"}, + {file = "yarl-1.9.2-cp38-cp38-win_amd64.whl", hash = "sha256:63c48f6cef34e6319a74c727376e95626f84ea091f92c0250a98e53e62c77c72"}, + {file = "yarl-1.9.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:75df5ef94c3fdc393c6b19d80e6ef1ecc9ae2f4263c09cacb178d871c02a5ba9"}, + {file = "yarl-1.9.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:c027a6e96ef77d401d8d5a5c8d6bc478e8042f1e448272e8d9752cb0aff8b5c8"}, + {file = "yarl-1.9.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f3b078dbe227f79be488ffcfc7a9edb3409d018e0952cf13f15fd6512847f3f7"}, + {file = "yarl-1.9.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:59723a029760079b7d991a401386390c4be5bfec1e7dd83e25a6a0881859e716"}, + {file = "yarl-1.9.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b03917871bf859a81ccb180c9a2e6c1e04d2f6a51d953e6a5cdd70c93d4e5a2a"}, + {file = "yarl-1.9.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c1012fa63eb6c032f3ce5d2171c267992ae0c00b9e164efe4d73db818465fac3"}, + {file = "yarl-1.9.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a74dcbfe780e62f4b5a062714576f16c2f3493a0394e555ab141bf0d746bb955"}, + {file = "yarl-1.9.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8c56986609b057b4839968ba901944af91b8e92f1725d1a2d77cbac6972b9ed1"}, + {file = "yarl-1.9.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:2c315df3293cd521033533d242d15eab26583360b58f7ee5d9565f15fee1bef4"}, + {file = "yarl-1.9.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:b7232f8dfbd225d57340e441d8caf8652a6acd06b389ea2d3222b8bc89cbfca6"}, + {file = "yarl-1.9.2-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:53338749febd28935d55b41bf0bcc79d634881195a39f6b2f767870b72514caf"}, + {file = "yarl-1.9.2-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:066c163aec9d3d073dc9ffe5dd3ad05069bcb03fcaab8d221290ba99f9f69ee3"}, + {file = "yarl-1.9.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:8288d7cd28f8119b07dd49b7230d6b4562f9b61ee9a4ab02221060d21136be80"}, + {file = "yarl-1.9.2-cp39-cp39-win32.whl", hash = "sha256:b124e2a6d223b65ba8768d5706d103280914d61f5cae3afbc50fc3dfcc016623"}, + {file = "yarl-1.9.2-cp39-cp39-win_amd64.whl", hash = "sha256:61016e7d582bc46a5378ffdd02cd0314fb8ba52f40f9cf4d9a5e7dbef88dee18"}, + {file = "yarl-1.9.2.tar.gz", hash = "sha256:04ab9d4b9f587c06d801c2abfe9317b77cdf996c65a90d5e84ecc45010823571"}, +] + +[package.dependencies] +idna = ">=2.0" +multidict = ">=4.0" + +[[package]] +name = "zipp" +version = "3.17.0" +description = "Backport of pathlib-compatible object wrapper for zip files" +optional = false +python-versions = ">=3.8" +files = [ + {file = "zipp-3.17.0-py3-none-any.whl", hash = "sha256:0e923e726174922dce09c53c59ad483ff7bbb8e572e00c7f7c46b88556409f31"}, + {file = "zipp-3.17.0.tar.gz", hash = "sha256:84e64a1c28cf7e91ed2078bb8cc8c259cb19b76942096c8d7b84947690cabaf0"}, +] + +[package.extras] +docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (<7.2.5)", "sphinx (>=3.5)", "sphinx-lint"] +testing = ["big-O", "jaraco.functools", "jaraco.itertools", "more-itertools", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-ignore-flaky", "pytest-mypy (>=0.9.1)", "pytest-ruff"] + +[[package]] +name = "zstandard" +version = "0.21.0" +description = "Zstandard bindings for Python" +optional = false +python-versions = ">=3.7" +files = [ + {file = "zstandard-0.21.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:649a67643257e3b2cff1c0a73130609679a5673bf389564bc6d4b164d822a7ce"}, + {file = "zstandard-0.21.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:144a4fe4be2e747bf9c646deab212666e39048faa4372abb6a250dab0f347a29"}, + {file = "zstandard-0.21.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b72060402524ab91e075881f6b6b3f37ab715663313030d0ce983da44960a86f"}, + {file = "zstandard-0.21.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8257752b97134477fb4e413529edaa04fc0457361d304c1319573de00ba796b1"}, + {file = "zstandard-0.21.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:c053b7c4cbf71cc26808ed67ae955836232f7638444d709bfc302d3e499364fa"}, + {file = "zstandard-0.21.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2769730c13638e08b7a983b32cb67775650024632cd0476bf1ba0e6360f5ac7d"}, + {file = "zstandard-0.21.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:7d3bc4de588b987f3934ca79140e226785d7b5e47e31756761e48644a45a6766"}, + {file = "zstandard-0.21.0-cp310-cp310-win32.whl", hash = "sha256:67829fdb82e7393ca68e543894cd0581a79243cc4ec74a836c305c70a5943f07"}, + {file = "zstandard-0.21.0-cp310-cp310-win_amd64.whl", hash = "sha256:e6048a287f8d2d6e8bc67f6b42a766c61923641dd4022b7fd3f7439e17ba5a4d"}, + {file = "zstandard-0.21.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:7f2afab2c727b6a3d466faee6974a7dad0d9991241c498e7317e5ccf53dbc766"}, + {file = "zstandard-0.21.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ff0852da2abe86326b20abae912d0367878dd0854b8931897d44cfeb18985472"}, + {file = "zstandard-0.21.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d12fa383e315b62630bd407477d750ec96a0f438447d0e6e496ab67b8b451d39"}, + {file = "zstandard-0.21.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f1b9703fe2e6b6811886c44052647df7c37478af1b4a1a9078585806f42e5b15"}, + {file = "zstandard-0.21.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:df28aa5c241f59a7ab524f8ad8bb75d9a23f7ed9d501b0fed6d40ec3064784e8"}, + {file = "zstandard-0.21.0-cp311-cp311-win32.whl", hash = "sha256:0aad6090ac164a9d237d096c8af241b8dcd015524ac6dbec1330092dba151657"}, + {file = "zstandard-0.21.0-cp311-cp311-win_amd64.whl", hash = "sha256:48b6233b5c4cacb7afb0ee6b4f91820afbb6c0e3ae0fa10abbc20000acdf4f11"}, + {file = "zstandard-0.21.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:e7d560ce14fd209db6adacce8908244503a009c6c39eee0c10f138996cd66d3e"}, + {file = "zstandard-0.21.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1e6e131a4df2eb6f64961cea6f979cdff22d6e0d5516feb0d09492c8fd36f3bc"}, + {file = "zstandard-0.21.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e1e0c62a67ff425927898cf43da2cf6b852289ebcc2054514ea9bf121bec10a5"}, + {file = "zstandard-0.21.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:1545fb9cb93e043351d0cb2ee73fa0ab32e61298968667bb924aac166278c3fc"}, + {file = "zstandard-0.21.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fe6c821eb6870f81d73bf10e5deed80edcac1e63fbc40610e61f340723fd5f7c"}, + {file = "zstandard-0.21.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:ddb086ea3b915e50f6604be93f4f64f168d3fc3cef3585bb9a375d5834392d4f"}, + {file = "zstandard-0.21.0-cp37-cp37m-win32.whl", hash = "sha256:57ac078ad7333c9db7a74804684099c4c77f98971c151cee18d17a12649bc25c"}, + {file = "zstandard-0.21.0-cp37-cp37m-win_amd64.whl", hash = "sha256:1243b01fb7926a5a0417120c57d4c28b25a0200284af0525fddba812d575f605"}, + {file = "zstandard-0.21.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:ea68b1ba4f9678ac3d3e370d96442a6332d431e5050223626bdce748692226ea"}, + {file = "zstandard-0.21.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:8070c1cdb4587a8aa038638acda3bd97c43c59e1e31705f2766d5576b329e97c"}, + {file = "zstandard-0.21.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4af612c96599b17e4930fe58bffd6514e6c25509d120f4eae6031b7595912f85"}, + {file = "zstandard-0.21.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cff891e37b167bc477f35562cda1248acc115dbafbea4f3af54ec70821090965"}, + {file = "zstandard-0.21.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:a9fec02ce2b38e8b2e86079ff0b912445495e8ab0b137f9c0505f88ad0d61296"}, + {file = "zstandard-0.21.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0bdbe350691dec3078b187b8304e6a9c4d9db3eb2d50ab5b1d748533e746d099"}, + {file = "zstandard-0.21.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:b69cccd06a4a0a1d9fb3ec9a97600055cf03030ed7048d4bcb88c574f7895773"}, + {file = "zstandard-0.21.0-cp38-cp38-win32.whl", hash = "sha256:9980489f066a391c5572bc7dc471e903fb134e0b0001ea9b1d3eff85af0a6f1b"}, + {file = "zstandard-0.21.0-cp38-cp38-win_amd64.whl", hash = "sha256:0e1e94a9d9e35dc04bf90055e914077c80b1e0c15454cc5419e82529d3e70728"}, + {file = "zstandard-0.21.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:d2d61675b2a73edcef5e327e38eb62bdfc89009960f0e3991eae5cc3d54718de"}, + {file = "zstandard-0.21.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:25fbfef672ad798afab12e8fd204d122fca3bc8e2dcb0a2ba73bf0a0ac0f5f07"}, + {file = "zstandard-0.21.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:62957069a7c2626ae80023998757e27bd28d933b165c487ab6f83ad3337f773d"}, + {file = "zstandard-0.21.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:14e10ed461e4807471075d4b7a2af51f5234c8f1e2a0c1d37d5ca49aaaad49e8"}, + {file = "zstandard-0.21.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:9cff89a036c639a6a9299bf19e16bfb9ac7def9a7634c52c257166db09d950e7"}, + {file = "zstandard-0.21.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:52b2b5e3e7670bd25835e0e0730a236f2b0df87672d99d3bf4bf87248aa659fb"}, + {file = "zstandard-0.21.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:b1367da0dde8ae5040ef0413fb57b5baeac39d8931c70536d5f013b11d3fc3a5"}, + {file = "zstandard-0.21.0-cp39-cp39-win32.whl", hash = "sha256:db62cbe7a965e68ad2217a056107cc43d41764c66c895be05cf9c8b19578ce9c"}, + {file = "zstandard-0.21.0-cp39-cp39-win_amd64.whl", hash = "sha256:a8d200617d5c876221304b0e3fe43307adde291b4a897e7b0617a61611dfff6a"}, + {file = "zstandard-0.21.0.tar.gz", hash = "sha256:f08e3a10d01a247877e4cb61a82a319ea746c356a3786558bed2481e6c405546"}, +] + +[package.dependencies] +cffi = {version = ">=1.11", markers = "platform_python_implementation == \"PyPy\""} + +[package.extras] +cffi = ["cffi (>=1.11)"] + +[extras] +adlfs = ["adlfs"] +duckdb = ["duckdb", "pyarrow"] +dynamodb = ["boto3"] +gcsfs = ["gcsfs"] +glue = ["boto3", "mypy-boto3-glue"] +hive = ["thrift"] +pandas = ["pandas", "pyarrow"] +pyarrow = ["pyarrow"] +ray = ["pandas", "pyarrow", "ray"] +s3fs = ["s3fs"] +snappy = ["python-snappy"] +sql-postgres = ["psycopg2-binary", "sqlalchemy"] +zstandard = ["zstandard"] + +[metadata] +lock-version = "2.0" +python-versions = "^3.8" +content-hash = "16c09897acd9967782da65d3c32730a6f5abe1ea047b23a359614a04bcc69300" diff --git a/pyiceberg/__init__.py b/pyiceberg/__init__.py new file mode 100644 index 0000000000..c95a617a26 --- /dev/null +++ b/pyiceberg/__init__.py @@ -0,0 +1,18 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +__version__ = "0.5.0" diff --git a/pyiceberg/avro/__init__.py b/pyiceberg/avro/__init__.py new file mode 100644 index 0000000000..75440db77c --- /dev/null +++ b/pyiceberg/avro/__init__.py @@ -0,0 +1,23 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +import struct + +STRUCT_BOOL = struct.Struct("?") +STRUCT_FLOAT = struct.Struct(" tuple[bytes, int]: + compressed_data = bz2.compress(data) + return compressed_data, len(compressed_data) + + @staticmethod + def decompress(data: bytes) -> bytes: + return bz2.decompress(data) + +except ImportError: + + class BZip2Codec(Codec): # type: ignore + @staticmethod + def compress(data: bytes) -> tuple[bytes, int]: + raise ImportError("Python bzip2 support not installed, please install the extension") + + @staticmethod + def decompress(data: bytes) -> bytes: + raise ImportError("Python bzip2 support not installed, please install the extension") diff --git a/pyiceberg/avro/codecs/codec.py b/pyiceberg/avro/codecs/codec.py new file mode 100644 index 0000000000..1c04f0db3e --- /dev/null +++ b/pyiceberg/avro/codecs/codec.py @@ -0,0 +1,33 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +from __future__ import annotations + +from abc import ABC, abstractmethod + + +class Codec(ABC): + """Abstract base class for all Avro codec classes.""" + + @staticmethod + @abstractmethod + def compress(data: bytes) -> tuple[bytes, int]: + ... + + @staticmethod + @abstractmethod + def decompress(data: bytes) -> bytes: + ... diff --git a/pyiceberg/avro/codecs/deflate.py b/pyiceberg/avro/codecs/deflate.py new file mode 100644 index 0000000000..33fc11cd43 --- /dev/null +++ b/pyiceberg/avro/codecs/deflate.py @@ -0,0 +1,36 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +from __future__ import annotations + +import zlib + +from pyiceberg.avro.codecs.codec import Codec + + +class DeflateCodec(Codec): + @staticmethod + def compress(data: bytes) -> tuple[bytes, int]: + # The first two characters and last character are zlib + # wrappers around deflate data. + compressed_data = zlib.compress(data)[2:-1] + return compressed_data, len(compressed_data) + + @staticmethod + def decompress(data: bytes) -> bytes: + # -15 is the log of the window size; negative indicates + # "raw" (no zlib headers) decompression. See zlib.h. + return zlib.decompress(data, -15) diff --git a/pyiceberg/avro/codecs/snappy_codec.py b/pyiceberg/avro/codecs/snappy_codec.py new file mode 100644 index 0000000000..2da8ed8f72 --- /dev/null +++ b/pyiceberg/avro/codecs/snappy_codec.py @@ -0,0 +1,69 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +from __future__ import annotations + +import binascii +import struct + +from pyiceberg.avro.codecs.codec import Codec + +STRUCT_CRC32 = struct.Struct(">I") # big-endian unsigned int + +try: + import snappy + + class SnappyCodec(Codec): + @staticmethod + def _check_crc32(bytes_: bytes, checksum: bytes) -> None: + """Incrementally compute CRC-32 from bytes and compare to a checksum. + + Args: + bytes_ (bytes): The bytes to check against `checksum` + checksum (bytes): Byte representation of a checksum + + Raises: + ValueError: If the computed CRC-32 does not match the checksum + """ + if binascii.crc32(bytes_) & 0xFFFFFFFF != STRUCT_CRC32.unpack(checksum)[0]: + raise ValueError("Checksum failure") + + @staticmethod + def compress(data: bytes) -> tuple[bytes, int]: + compressed_data = snappy.compress(data) + # A 4-byte, big-endian CRC32 checksum + compressed_data += STRUCT_CRC32.pack(binascii.crc32(data) & 0xFFFFFFFF) + return compressed_data, len(compressed_data) + + @staticmethod + def decompress(data: bytes) -> bytes: + # Compressed data includes a 4-byte CRC32 checksum + data = data[0:-4] + uncompressed = snappy.decompress(data) + checksum = data[-4:] + SnappyCodec._check_crc32(uncompressed, checksum) + return uncompressed + +except ImportError: + + class SnappyCodec(Codec): # type: ignore + @staticmethod + def compress(data: bytes) -> tuple[bytes, int]: + raise ImportError("Snappy support not installed, please install using `pip install pyiceberg[snappy]`") + + @staticmethod + def decompress(data: bytes) -> bytes: + raise ImportError("Snappy support not installed, please install using `pip install pyiceberg[snappy]`") diff --git a/pyiceberg/avro/codecs/zstandard_codec.py b/pyiceberg/avro/codecs/zstandard_codec.py new file mode 100644 index 0000000000..a048f68490 --- /dev/null +++ b/pyiceberg/avro/codecs/zstandard_codec.py @@ -0,0 +1,53 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +from __future__ import annotations + +from io import BytesIO + +from pyiceberg.avro.codecs.codec import Codec + +try: + from zstandard import ZstdCompressor, ZstdDecompressor + + class ZStandardCodec(Codec): + @staticmethod + def compress(data: bytes) -> tuple[bytes, int]: + compressed_data = ZstdCompressor().compress(data) + return compressed_data, len(compressed_data) + + @staticmethod + def decompress(data: bytes) -> bytes: + uncompressed = bytearray() + dctx = ZstdDecompressor() + with dctx.stream_reader(BytesIO(data)) as reader: + while True: + chunk = reader.read(16384) + if not chunk: + break + uncompressed.extend(chunk) + return uncompressed + +except ImportError: + + class ZStandardCodec(Codec): # type: ignore + @staticmethod + def compress(data: bytes) -> tuple[bytes, int]: + raise ImportError("Zstandard support not installed, please install using `pip install pyiceberg[zstandard]`") + + @staticmethod + def decompress(data: bytes) -> bytes: + raise ImportError("Zstandard support not installed, please install using `pip install pyiceberg[zstandard]`") diff --git a/pyiceberg/avro/decoder.py b/pyiceberg/avro/decoder.py new file mode 100644 index 0000000000..ab7813670f --- /dev/null +++ b/pyiceberg/avro/decoder.py @@ -0,0 +1,186 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +import io +from abc import ABC, abstractmethod +from io import SEEK_CUR +from typing import ( + Dict, + List, + Tuple, + Union, + cast, +) + +from pyiceberg.avro import STRUCT_DOUBLE, STRUCT_FLOAT +from pyiceberg.io import InputStream + + +class BinaryDecoder(ABC): + """Decodes bytes into Python physical primitives.""" + + @abstractmethod + def tell(self) -> int: + """Return the current position.""" + + @abstractmethod + def read(self, n: int) -> bytes: + """Read n bytes.""" + + @abstractmethod + def skip(self, n: int) -> None: + """Skip n bytes.""" + + def read_boolean(self) -> bool: + """Read a value from the stream as a boolean. + + A boolean is written as a single byte + whose value is either 0 (false) or 1 (true). + """ + return ord(self.read(1)) == 1 + + def read_int(self) -> int: + """Read an int/long value. + + int/long values are written using variable-length, zigzag coding. + """ + b = ord(self.read(1)) + n = b & 0x7F + shift = 7 + while (b & 0x80) != 0: + b = ord(self.read(1)) + n |= (b & 0x7F) << shift + shift += 7 + datum = (n >> 1) ^ -(n & 1) + return datum + + def read_ints(self, n: int) -> Tuple[int, ...]: + """Read a list of integers.""" + return tuple(self.read_int() for _ in range(n)) + + def read_int_bytes_dict(self, n: int, dest: Dict[int, bytes]) -> None: + """Read a dictionary of integers for keys and bytes for values into a destination dictionary.""" + for _ in range(n): + k = self.read_int() + v = self.read_bytes() + dest[k] = v + + def read_float(self) -> float: + """Read a value from the stream as a float. + + A float is written as 4 bytes. + The float is converted into a 32-bit integer using a method equivalent to + Java's floatToIntBits and then encoded in little-endian format. + """ + return float(cast(Tuple[float, ...], STRUCT_FLOAT.unpack(self.read(4)))[0]) + + def read_double(self) -> float: + """Read a value from the stream as a double. + + A double is written as 8 bytes. + The double is converted into a 64-bit integer using a method equivalent to + Java's doubleToLongBits and then encoded in little-endian format. + """ + return float(cast(Tuple[float, ...], STRUCT_DOUBLE.unpack(self.read(8)))[0]) + + def read_bytes(self) -> bytes: + """Bytes are encoded as a long followed by that many bytes of data.""" + num_bytes = self.read_int() + return self.read(num_bytes) if num_bytes > 0 else b"" + + def read_utf8(self) -> str: + """Read an utf-8 encoded string from the stream. + + A string is encoded as a long followed by + that many bytes of UTF-8 encoded character data. + """ + return self.read_bytes().decode("utf-8") + + def skip_boolean(self) -> None: + self.skip(1) + + def skip_int(self) -> None: + b = ord(self.read(1)) + while (b & 0x80) != 0: + b = ord(self.read(1)) + + def skip_float(self) -> None: + self.skip(4) + + def skip_double(self) -> None: + self.skip(8) + + def skip_bytes(self) -> None: + self.skip(self.read_int()) + + def skip_utf8(self) -> None: + self.skip_bytes() + + +class StreamingBinaryDecoder(BinaryDecoder): + """Decodes bytes into Python physical primitives.""" + + __slots__ = "_input_stream" + _input_stream: InputStream + + def __init__(self, input_stream: Union[bytes, InputStream]) -> None: + """Reader is a Python object on which we can call read, seek, and tell.""" + if isinstance(input_stream, bytes): + # In the case of bytes, we wrap it into a BytesIO to make it a stream + self._input_stream = io.BytesIO(input_stream) + else: + self._input_stream = input_stream + + def tell(self) -> int: + """Return the current stream position.""" + return self._input_stream.tell() + + def read(self, n: int) -> bytes: + """Read n bytes.""" + if n < 0: + raise ValueError(f"Requested {n} bytes to read, expected positive integer.") + data: List[bytes] = [] + + n_remaining = n + while n_remaining > 0: + data_read = self._input_stream.read(n_remaining) + read_len = len(data_read) + if read_len == n: + # If we read everything, we return directly + # otherwise we'll continue to fetch the rest + return data_read + elif read_len <= 0: + raise EOFError(f"EOF: read {read_len} bytes") + data.append(data_read) + n_remaining -= read_len + + return b"".join(data) + + def skip(self, n: int) -> None: + self._input_stream.seek(n, SEEK_CUR) + + +def new_decoder(b: bytes) -> BinaryDecoder: + try: + from pyiceberg.avro.decoder_fast import CythonBinaryDecoder + + return CythonBinaryDecoder(b) + except ModuleNotFoundError: + import warnings + + warnings.warn("Falling back to pure Python Avro decoder, missing Cython implementation") + + return StreamingBinaryDecoder(b) diff --git a/pyiceberg/avro/decoder_basic.c b/pyiceberg/avro/decoder_basic.c new file mode 100644 index 0000000000..c3954f330d --- /dev/null +++ b/pyiceberg/avro/decoder_basic.c @@ -0,0 +1,65 @@ +/* + Licensed to the Apache Software Foundation (ASF) under one + or more contributor license agreements. See the NOTICE file + distributed with this work for additional information + regarding copyright ownership. The ASF licenses this file + to you under the Apache License, Version 2.0 (the + "License"); you may not use this file except in compliance + with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, + software distributed under the License is distributed on an + "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + KIND, either express or implied. See the License for the + specific language governing permissions and limitations + under the License. +*/ + +#include + +/* + Decode an an array of zig-zag encoded integers from a buffer. + + The buffer is advanced to the end of the integers. + `count` is the number of integers to decode. + `result` is where the decoded integers are stored. + + The result is guaranteed to be 64 bits wide. + +*/ +static inline void decode_zigzag_ints(const unsigned char **buffer, const uint64_t count, uint64_t *result) { + uint64_t current_index; + const unsigned char *current_position = *buffer; + uint64_t temp; + // The largest shift will always be < 64 + unsigned char shift; + + for (current_index = 0; current_index < count; current_index++) { + shift = 7; + temp = *current_position & 0x7F; + while(*current_position & 0x80) { + current_position += 1; + temp |= (uint64_t)(*current_position & 0x7F) << shift; + shift += 7; + } + result[current_index] = (temp >> 1) ^ (~(temp & 1) + 1); + current_position += 1; + } + *buffer = current_position; +} + + + +/* + Skip a zig-zag encoded integer in a buffer. + + The buffer is advanced to the end of the integer. +*/ +static inline void skip_zigzag_int(const unsigned char **buffer) { + while(**buffer & 0x80) { + *buffer += 1; + } + *buffer += 1; +} diff --git a/pyiceberg/avro/decoder_fast.pyi b/pyiceberg/avro/decoder_fast.pyi new file mode 100644 index 0000000000..cf45ce5066 --- /dev/null +++ b/pyiceberg/avro/decoder_fast.pyi @@ -0,0 +1,56 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from pyiceberg.avro.decoder import BinaryDecoder + +class CythonBinaryDecoder(BinaryDecoder): + def __init__(self, input_contents: bytes) -> None: + pass + def tell(self) -> int: + pass + def read(self, n: int) -> bytes: + pass + def read_boolean(self) -> bool: + pass + def read_int(self) -> int: + pass + def read_ints(self, count: int) -> tuple[int, ...]: + pass + def read_int_bytes_dict(self, count: int, dest: dict[int, bytes]) -> None: + pass + def read_bytes(self) -> bytes: + pass + def read_float(self) -> float: + pass + def read_double(self) -> float: + pass + def read_utf8(self) -> str: + pass + def skip(self, n: int) -> None: + pass + def skip_int(self) -> None: + pass + def skip_boolean(self) -> None: + pass + def skip_float(self) -> None: + pass + def skip_double(self) -> None: + pass + def skip_bytes(self) -> None: + pass + def skip_utf8(self) -> None: + pass diff --git a/pyiceberg/avro/decoder_fast.pyx b/pyiceberg/avro/decoder_fast.pyx new file mode 100644 index 0000000000..182fd0e92e --- /dev/null +++ b/pyiceberg/avro/decoder_fast.pyx @@ -0,0 +1,182 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +import cython +from cython.cimports.cpython import array +from pyiceberg.avro import STRUCT_DOUBLE, STRUCT_FLOAT +from cpython.mem cimport PyMem_Malloc, PyMem_Realloc, PyMem_Free +from libc.string cimport memcpy +from libc.stdint cimport uint64_t, int64_t + +import array + + +cdef extern from "decoder_basic.c": + void decode_zigzag_ints(const unsigned char **buffer, const uint64_t count, uint64_t *result); + void skip_zigzag_int(const unsigned char **buffer); + +unsigned_long_long_array_template = cython.declare(array.array, array.array('Q', [])) + +@cython.final +cdef class CythonBinaryDecoder: + """Implement a BinaryDecoder that reads from an in-memory buffer. + + """ + + # This the data that is duplicated when the decoder is created. + cdef unsigned char *_data + + # This is the current pointer to the buffer. + cdef const unsigned char *_current + + # This is the address after the data buffer + cdef const unsigned char *_end + + # This is the size of the buffer of the data being parsed. + cdef uint64_t _size + + def __cinit__(self, input_contents: bytes) -> None: + self._size = len(input_contents) + + # Make a copy of the data so the data can be iterated. + self._data = PyMem_Malloc(self._size * sizeof(char)) + if not self._data: + raise MemoryError() + cdef const unsigned char *input_as_array = input_contents + memcpy(self._data, input_as_array, self._size) + self._end = self._data + self._size + self._current = self._data + + def __dealloc__(self): + PyMem_Free(self._data) + + cpdef unsigned int tell(self): + """Return the current stream position.""" + return self._current - self._data + + cpdef bytes read(self, n: int): + """Read n bytes.""" + if n < 0: + raise ValueError(f"Requested {n} bytes to read, expected positive integer.") + cdef const unsigned char *r = self._current + self._current += n + return r[0:n] + + def read_boolean(self) -> bool: + """Reads a value from the stream as a boolean. + + A boolean is written as a single byte + whose value is either 0 (false) or 1 (true). + """ + self._current += 1; + return self._current[-1] != 0 + + cpdef inline int64_t read_int(self): + """Reads a value from the stream as an integer. + + int/long values are written using variable-length, zigzag coding. + """ + cdef uint64_t result; + if self._current >= self._end: + raise EOFError(f"EOF: read 1 bytes") + decode_zigzag_ints(&self._current, 1, &result) + return result + + def read_ints(self, count: int) -> array.array[int]: + """Reads a list of integers.""" + newarray = array.clone(unsigned_long_long_array_template, count, zero=False) + if self._current >= self._end: + raise EOFError(f"EOF: read 1 bytes") + decode_zigzag_ints(&self._current, count, newarray.data.as_ulonglongs) + return newarray + + cpdef void read_int_bytes_dict(self, count: int, dest: Dict[int, bytes]): + """Reads a dictionary of integers for keys and bytes for values into a destination dict.""" + cdef uint64_t result[2]; + if self._current >= self._end: + raise EOFError(f"EOF: read 1 bytes") + + for _ in range(count): + decode_zigzag_ints(&self._current, 2, &result) + if result[1] <= 0: + dest[result[0]] = b"" + else: + dest[result[0]] = self._current[0:result[1]] + self._current += result[1] + + cpdef inline bytes read_bytes(self): + """Bytes are encoded as a long followed by that many bytes of data.""" + cdef uint64_t length; + if self._current >= self._end: + raise EOFError(f"EOF: read 1 bytes") + + decode_zigzag_ints(&self._current, 1, &length) + + if length <= 0: + return b"" + cdef const unsigned char *r = self._current + self._current += length + return r[0:length] + + cpdef float read_float(self): + """Reads a value from the stream as a float. + + A float is written as 4 bytes. + The float is converted into a 32-bit integer using a method equivalent to + Java's floatToIntBits and then encoded in little-endian format. + """ + return float(STRUCT_FLOAT.unpack(self.read(4))[0]) + + cpdef float read_double(self): + """Reads a value from the stream as a double. + + A double is written as 8 bytes. + The double is converted into a 64-bit integer using a method equivalent to + Java's doubleToLongBits and then encoded in little-endian format. + """ + return float(STRUCT_DOUBLE.unpack(self.read(8))[0]) + + cpdef str read_utf8(self): + """Reads a utf-8 encoded string from the stream. + + A string is encoded as a long followed by + that many bytes of UTF-8 encoded character data. + """ + return self.read_bytes().decode("utf-8") + + def skip_int(self) -> None: + skip_zigzag_int(&self._current) + return + + def skip(self, n: int) -> None: + self._current += n + + def skip_boolean(self) -> None: + self._current += 1 + + def skip_float(self) -> None: + self._current += 4 + + def skip_double(self) -> None: + self._current += 8 + + def skip_bytes(self) -> None: + cdef uint64_t result; + decode_zigzag_ints(&self._current, 1, &result) + self._current += result + + def skip_utf8(self) -> None: + self.skip_bytes() diff --git a/pyiceberg/avro/encoder.py b/pyiceberg/avro/encoder.py new file mode 100644 index 0000000000..238d5d683a --- /dev/null +++ b/pyiceberg/avro/encoder.py @@ -0,0 +1,75 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +from uuid import UUID + +from pyiceberg.avro import STRUCT_DOUBLE, STRUCT_FLOAT +from pyiceberg.io import OutputStream + + +class BinaryEncoder: + """Encodes Python physical types into bytes.""" + + _output_stream: OutputStream + + def __init__(self, output_stream: OutputStream) -> None: + self._output_stream = output_stream + + def write(self, b: bytes) -> None: + self._output_stream.write(b) + + def write_boolean(self, boolean: bool) -> None: + """Write a boolean as a single byte whose value is either 0 (false) or 1 (true). + + Args: + boolean: The boolean to write. + """ + self.write(bytearray([bool(boolean)])) + + def write_int(self, integer: int) -> None: + """Integer and long values are written using variable-length zig-zag coding.""" + datum = (integer << 1) ^ (integer >> 63) + while (datum & ~0x7F) != 0: + self.write(bytearray([(datum & 0x7F) | 0x80])) + datum >>= 7 + self.write(bytearray([datum])) + + def write_float(self, f: float) -> None: + """Write a float as 4 bytes.""" + self.write(STRUCT_FLOAT.pack(f)) + + def write_double(self, f: float) -> None: + """Write a double as 8 bytes.""" + self.write(STRUCT_DOUBLE.pack(f)) + + def write_bytes(self, b: bytes) -> None: + """Bytes are encoded as a long followed by that many bytes of data.""" + self.write_int(len(b)) + self.write(b) + + def write_utf8(self, s: str) -> None: + """Encode a string as a long followed by that many bytes of UTF-8 encoded character data.""" + self.write_bytes(s.encode("utf-8")) + + def write_uuid(self, uuid: UUID) -> None: + """Write UUID as a fixed[16]. + + The uuid logical type represents a random generated universally unique identifier (UUID). + An uuid logical type annotates an Avro string. The string has to conform with RFC-4122. + """ + if len(uuid.bytes) != 16: + raise ValueError(f"Expected UUID to have 16 bytes, got: len({uuid.bytes!r})") + return self.write(uuid.bytes) diff --git a/pyiceberg/avro/file.py b/pyiceberg/avro/file.py new file mode 100644 index 0000000000..dc843f6dc0 --- /dev/null +++ b/pyiceberg/avro/file.py @@ -0,0 +1,276 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# pylint: disable=W0621 +"""Avro reader for reading Avro files.""" +from __future__ import annotations + +import io +import json +import os +from dataclasses import dataclass +from enum import Enum +from types import TracebackType +from typing import ( + Callable, + Dict, + Generic, + List, + Optional, + Type, + TypeVar, +) + +from pyiceberg.avro.codecs import KNOWN_CODECS, Codec +from pyiceberg.avro.decoder import BinaryDecoder, new_decoder +from pyiceberg.avro.encoder import BinaryEncoder +from pyiceberg.avro.reader import Reader +from pyiceberg.avro.resolver import construct_reader, construct_writer, resolve +from pyiceberg.avro.writer import Writer +from pyiceberg.io import InputFile, OutputFile, OutputStream +from pyiceberg.schema import Schema +from pyiceberg.typedef import EMPTY_DICT, Record, StructProtocol +from pyiceberg.types import ( + FixedType, + MapType, + NestedField, + StringType, + StructType, +) +from pyiceberg.utils.schema_conversion import AvroSchemaConversion + +VERSION = 1 +MAGIC = bytes(b"Obj" + bytearray([VERSION])) +MAGIC_SIZE = len(MAGIC) +SYNC_SIZE = 16 +META_SCHEMA = StructType( + NestedField(name="magic", field_id=100, field_type=FixedType(length=MAGIC_SIZE), required=True), + NestedField( + field_id=200, + name="meta", + field_type=MapType(key_id=201, key_type=StringType(), value_id=202, value_type=StringType(), value_required=True), + required=True, + ), + NestedField(field_id=300, name="sync", field_type=FixedType(length=SYNC_SIZE), required=True), +) + +_CODEC_KEY = "avro.codec" +_SCHEMA_KEY = "avro.schema" + + +class AvroFileHeader(Record): + __slots__ = ("magic", "meta", "sync") + magic: bytes + meta: Dict[str, str] + sync: bytes + + def compression_codec(self) -> Optional[Type[Codec]]: + """Get the file's compression codec algorithm from the file's metadata. + + In the case of a null codec, we return a None indicating that we + don't need to compress/decompress. + """ + codec_name = self.meta.get(_CODEC_KEY, "null") + if codec_name not in KNOWN_CODECS: + raise ValueError(f"Unsupported codec: {codec_name}") + + return KNOWN_CODECS[codec_name] + + def get_schema(self) -> Schema: + if _SCHEMA_KEY in self.meta: + avro_schema_string = self.meta[_SCHEMA_KEY] + avro_schema = json.loads(avro_schema_string) + return AvroSchemaConversion().avro_to_iceberg(avro_schema) + else: + raise ValueError("No schema found in Avro file headers") + + +D = TypeVar("D", bound=StructProtocol) + + +@dataclass +class Block(Generic[D]): + reader: Reader + block_records: int + block_decoder: BinaryDecoder + position: int = 0 + + def __iter__(self) -> Block[D]: + """Return an iterator for the Block class.""" + return self + + def has_next(self) -> bool: + return self.position < self.block_records + + def __next__(self) -> D: + """Return the next item when iterating over the Block class.""" + if self.has_next(): + self.position += 1 + return self.reader.read(self.block_decoder) + raise StopIteration + + +class AvroFile(Generic[D]): + __slots__ = ( + "input_file", + "read_schema", + "read_types", + "read_enums", + "header", + "schema", + "reader", + "decoder", + "block", + ) + input_file: InputFile + read_schema: Optional[Schema] + read_types: Dict[int, Callable[..., StructProtocol]] + read_enums: Dict[int, Callable[..., Enum]] + header: AvroFileHeader + schema: Schema + reader: Reader + + decoder: BinaryDecoder + block: Optional[Block[D]] + + def __init__( + self, + input_file: InputFile, + read_schema: Optional[Schema] = None, + read_types: Dict[int, Callable[..., StructProtocol]] = EMPTY_DICT, + read_enums: Dict[int, Callable[..., Enum]] = EMPTY_DICT, + ) -> None: + self.input_file = input_file + self.read_schema = read_schema + self.read_types = read_types + self.read_enums = read_enums + self.block = None + + def __enter__(self) -> AvroFile[D]: + """Generate a reader tree for the payload within an avro file. + + Return: + A generator returning the AvroStructs. + """ + with self.input_file.open() as f: + self.decoder = new_decoder(f.read()) + self.header = self._read_header() + self.schema = self.header.get_schema() + if not self.read_schema: + self.read_schema = self.schema + + self.reader = resolve(self.schema, self.read_schema, self.read_types, self.read_enums) + + return self + + def __exit__( + self, exctype: Optional[Type[BaseException]], excinst: Optional[BaseException], exctb: Optional[TracebackType] + ) -> None: + """Perform cleanup when exiting the scope of a 'with' statement.""" + + def __iter__(self) -> AvroFile[D]: + """Return an iterator for the AvroFile class.""" + return self + + def _read_block(self) -> int: + # If there is already a block, we'll have the sync bytes + if self.block: + sync_marker = self.decoder.read(SYNC_SIZE) + if sync_marker != self.header.sync: + raise ValueError(f"Expected sync bytes {self.header.sync!r}, but got {sync_marker!r}") + block_records = self.decoder.read_int() + + block_bytes_len = self.decoder.read_int() + block_bytes = self.decoder.read(block_bytes_len) + if codec := self.header.compression_codec(): + block_bytes = codec.decompress(block_bytes) + + self.block = Block(reader=self.reader, block_records=block_records, block_decoder=new_decoder(block_bytes)) + return block_records + + def __next__(self) -> D: + """Return the next item when iterating over the AvroFile class.""" + if self.block and self.block.has_next(): + return next(self.block) + + try: + new_block = self._read_block() + except EOFError as exc: + raise StopIteration from exc + + if new_block > 0: + return self.__next__() + raise StopIteration + + def _read_header(self) -> AvroFileHeader: + return construct_reader(META_SCHEMA, {-1: AvroFileHeader}).read(self.decoder) + + +class AvroOutputFile(Generic[D]): + output_file: OutputFile + output_stream: OutputStream + schema: Schema + schema_name: str + encoder: BinaryEncoder + sync_bytes: bytes + writer: Writer + + def __init__(self, output_file: OutputFile, schema: Schema, schema_name: str, metadata: Dict[str, str] = EMPTY_DICT) -> None: + self.output_file = output_file + self.schema = schema + self.schema_name = schema_name + self.sync_bytes = os.urandom(SYNC_SIZE) + self.writer = construct_writer(self.schema) + self.metadata = metadata + + def __enter__(self) -> AvroOutputFile[D]: + """ + Open the file and writes the header. + + Returns: + The file object to write records to + """ + self.output_stream = self.output_file.create(overwrite=True) + self.encoder = BinaryEncoder(self.output_stream) + + self._write_header() + self.writer = construct_writer(self.schema) + + return self + + def __exit__( + self, exctype: Optional[Type[BaseException]], excinst: Optional[BaseException], exctb: Optional[TracebackType] + ) -> None: + """Perform cleanup when exiting the scope of a 'with' statement.""" + self.output_stream.close() + + def _write_header(self) -> None: + json_schema = json.dumps(AvroSchemaConversion().iceberg_to_avro(self.schema, schema_name=self.schema_name)) + meta = {**self.metadata, _SCHEMA_KEY: json_schema, _CODEC_KEY: "null"} + header = AvroFileHeader(magic=MAGIC, meta=meta, sync=self.sync_bytes) + construct_writer(META_SCHEMA).write(self.encoder, header) + + def write_block(self, objects: List[D]) -> None: + in_memory = io.BytesIO() + block_content_encoder = BinaryEncoder(output_stream=in_memory) + for obj in objects: + self.writer.write(block_content_encoder, obj) + block_content = in_memory.getvalue() + + self.encoder.write_int(len(objects)) + self.encoder.write_int(len(block_content)) + self.encoder.write(block_content) + self.encoder.write(self.sync_bytes) diff --git a/pyiceberg/avro/reader.py b/pyiceberg/avro/reader.py new file mode 100644 index 0000000000..2b87e4b06f --- /dev/null +++ b/pyiceberg/avro/reader.py @@ -0,0 +1,492 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +""" +Classes for building the Reader tree. + +Constructing a reader tree from the schema makes it easy +to decouple the reader implementation from the schema. + +The reader tree can be changed in such a way that the +read schema is different, while respecting the read schema. +""" +from __future__ import annotations + +from abc import abstractmethod +from dataclasses import dataclass +from dataclasses import field as dataclassfield +from decimal import Decimal +from typing import ( + Any, + Callable, + List, + Mapping, + Optional, + Tuple, +) +from uuid import UUID + +from pyiceberg.avro.decoder import BinaryDecoder +from pyiceberg.typedef import StructProtocol +from pyiceberg.types import StructType +from pyiceberg.utils.decimal import bytes_to_decimal, decimal_required_bytes +from pyiceberg.utils.lazydict import LazyDict +from pyiceberg.utils.singleton import Singleton + + +def _skip_map_array(decoder: BinaryDecoder, skip_entry: Callable[[], None]) -> None: + """Skips over an array or map. + + Both the array and map are encoded similar, and we can re-use + the logic of skipping in an efficient way. + + From the Avro spec: + + Maps (and arrays) are encoded as a series of blocks. + Each block consists of a long count value, followed by that many key/value pairs in the case of a map, + and followed by that many array items in the case of an array. A block with count zero indicates the + end of the map. Each item is encoded per the map's value schema. + + If a block's count is negative, its absolute value is used, and the count is followed immediately by a + long block size indicating the number of bytes in the block. This block size permits fast skipping + through data, e.g., when projecting a record to a subset of its fields. + + Args: + decoder: + The decoder that reads the types from the underlying data. + skip_entry: + Function to skip over the underlying data, element in case of an array, and the + key/value in the case of a map. + """ + block_count = decoder.read_int() + while block_count != 0: + if block_count < 0: + # The length in bytes in encoded, so we can skip over it right away + block_size = decoder.read_int() + decoder.skip(block_size) + else: + for _ in range(block_count): + skip_entry() + block_count = decoder.read_int() + + +class Reader(Singleton): + @abstractmethod + def read(self, decoder: BinaryDecoder) -> Any: + ... + + @abstractmethod + def skip(self, decoder: BinaryDecoder) -> None: + ... + + def __repr__(self) -> str: + """Return the string representation of the Reader class.""" + return f"{self.__class__.__name__}()" + + +class NoneReader(Reader): + def read(self, _: BinaryDecoder) -> None: + return None + + def skip(self, decoder: BinaryDecoder) -> None: + return None + + +class DefaultReader(Reader): + __slots__ = ("default_value",) + default_value: Any + + def __init__(self, default_value: Any) -> None: + self.default_value = default_value + + def read(self, _: BinaryDecoder) -> Any: + return self.default_value + + def skip(self, decoder: BinaryDecoder) -> None: + pass + + +class BooleanReader(Reader): + def read(self, decoder: BinaryDecoder) -> bool: + return decoder.read_boolean() + + def skip(self, decoder: BinaryDecoder) -> None: + decoder.skip_boolean() + + +class IntegerReader(Reader): + """Longs and ints are encoded the same way, and there is no long in Python.""" + + def read(self, decoder: BinaryDecoder) -> int: + return decoder.read_int() + + def skip(self, decoder: BinaryDecoder) -> None: + decoder.skip_int() + + +class FloatReader(Reader): + def read(self, decoder: BinaryDecoder) -> float: + return decoder.read_float() + + def skip(self, decoder: BinaryDecoder) -> None: + decoder.skip_float() + + +class DoubleReader(Reader): + def read(self, decoder: BinaryDecoder) -> float: + return decoder.read_double() + + def skip(self, decoder: BinaryDecoder) -> None: + decoder.skip_double() + + +class DateReader(IntegerReader): + """Reads a day granularity date from the stream. + + The number of days from 1 January 1970. + """ + + +class TimeReader(IntegerReader): + """Reads a microsecond granularity timestamp from the stream. + + Long is decoded as an integer which represents + the number of microseconds from the unix epoch, 1 January 1970. + """ + + +class TimestampReader(IntegerReader): + """Reads a microsecond granularity timestamp from the stream. + + Long is decoded as python integer which represents + the number of microseconds from the unix epoch, 1 January 1970. + """ + + +class TimestamptzReader(IntegerReader): + """Reads a microsecond granularity timestamptz from the stream. + + Long is decoded as python integer which represents + the number of microseconds from the unix epoch, 1 January 1970. + + Adjusted to UTC. + """ + + +class StringReader(Reader): + def read(self, decoder: BinaryDecoder) -> str: + return decoder.read_utf8() + + def skip(self, decoder: BinaryDecoder) -> None: + decoder.skip_utf8() + + +class UUIDReader(Reader): + def read(self, decoder: BinaryDecoder) -> UUID: + return UUID(bytes=decoder.read(16)) + + def skip(self, decoder: BinaryDecoder) -> None: + decoder.skip(16) + + +@dataclass(frozen=True) +class FixedReader(Reader): + _len: int = dataclassfield() + + def read(self, decoder: BinaryDecoder) -> bytes: + return decoder.read(len(self)) + + def skip(self, decoder: BinaryDecoder) -> None: + decoder.skip(len(self)) + + def __len__(self) -> int: + """Return the length of an instance of the FixedReader class.""" + return self._len + + def __repr__(self) -> str: + """Return the string representation of the FixedReader class.""" + return f"FixedReader({self._len})" + + +class BinaryReader(Reader): + """Read a binary value. + + First reads an integer, to get the length of the binary value, + then reads the binary field itself. + """ + + def read(self, decoder: BinaryDecoder) -> bytes: + return decoder.read_bytes() + + def skip(self, decoder: BinaryDecoder) -> None: + decoder.skip_bytes() + + +@dataclass(frozen=True, init=False) +class DecimalReader(Reader): + """Reads a value as a decimal. + + Decimal bytes are decoded as signed short, int or long depending on the + size of bytes. + """ + + precision: int = dataclassfield() + scale: int = dataclassfield() + _length: int + + def __init__(self, precision: int, scale: int): + object.__setattr__(self, "precision", precision) + object.__setattr__(self, "scale", scale) + object.__setattr__(self, "_length", decimal_required_bytes(precision)) + + def read(self, decoder: BinaryDecoder) -> Decimal: + return bytes_to_decimal(decoder.read(self._length), self.scale) + + def skip(self, decoder: BinaryDecoder) -> None: + decoder.skip_bytes() + + def __repr__(self) -> str: + """Return the string representation of the DecimalReader class.""" + return f"DecimalReader({self.precision}, {self.scale})" + + +@dataclass(frozen=True) +class OptionReader(Reader): + option: Reader = dataclassfield() + + def read(self, decoder: BinaryDecoder) -> Optional[Any]: + # For the Iceberg spec it is required to set the default value to null + # From https://iceberg.apache.org/spec/#avro + # Optional fields must always set the Avro field default value to null. + # + # This means that null has to come first: + # https://avro.apache.org/docs/current/spec.html + # type of the default value must match the first element of the union. + # This is enforced in the schema conversion, which happens prior + # to building the reader tree + if decoder.read_int() > 0: + return self.option.read(decoder) + return None + + def skip(self, decoder: BinaryDecoder) -> None: + if decoder.read_int() > 0: + return self.option.skip(decoder) + + +class StructReader(Reader): + __slots__ = ("field_readers", "create_struct", "struct", "_create_with_keyword", "_field_reader_functions", "_hash") + field_readers: Tuple[Tuple[Optional[int], Reader], ...] + create_struct: Callable[..., StructProtocol] + struct: StructType + field_reader_functions = Tuple[Tuple[Optional[str], int, Optional[Callable[[BinaryDecoder], Any]]], ...] + + def __init__( + self, + field_readers: Tuple[Tuple[Optional[int], Reader], ...], + create_struct: Callable[..., StructProtocol], + struct: StructType, + ) -> None: + self.field_readers = field_readers + self.create_struct = create_struct + self.struct = struct + + try: + # Try initializing the struct, first with the struct keyword argument + created_struct = self.create_struct(struct=self.struct) + self._create_with_keyword = True + except TypeError as e: + if "'struct' is an invalid keyword argument for" in str(e): + created_struct = self.create_struct() + self._create_with_keyword = False + else: + raise ValueError(f"Unable to initialize struct: {self.create_struct}") from e + + if not isinstance(created_struct, StructProtocol): + raise ValueError(f"Incompatible with StructProtocol: {self.create_struct}") + + reading_callbacks: List[Tuple[Optional[int], Callable[[BinaryDecoder], Any]]] = [] + for pos, field in field_readers: + if pos is not None: + reading_callbacks.append((pos, field.read)) + else: + reading_callbacks.append((None, field.skip)) + + self._field_reader_functions = tuple(reading_callbacks) + self._hash = hash(self._field_reader_functions) + + def read(self, decoder: BinaryDecoder) -> StructProtocol: + struct = self.create_struct(struct=self.struct) if self._create_with_keyword else self.create_struct() + for pos, field_reader in self._field_reader_functions: + if pos is not None: + struct[pos] = field_reader(decoder) # later: pass reuse in here + else: + field_reader(decoder) + + return struct + + def skip(self, decoder: BinaryDecoder) -> None: + for _, field in self.field_readers: + field.skip(decoder) + + def __eq__(self, other: Any) -> bool: + """Return the equality of two instances of the StructReader class.""" + return ( + self.field_readers == other.field_readers and self.create_struct == other.create_struct + if isinstance(other, StructReader) + else False + ) + + def __repr__(self) -> str: + """Return the string representation of the StructReader class.""" + return f"StructReader(({','.join(repr(field) for field in self.field_readers)}), {repr(self.create_struct)})" + + def __hash__(self) -> int: + """Return a hashed representation of the StructReader class.""" + return self._hash + + +@dataclass(frozen=False, init=False) +class ListReader(Reader): + __slots__ = ("element", "_is_int_list", "_hash") + element: Reader + + def __init__(self, element: Reader) -> None: + super().__init__() + self.element = element + self._hash = hash(self.element) + self._is_int_list = isinstance(self.element, IntegerReader) + + def read(self, decoder: BinaryDecoder) -> List[Any]: + read_items: List[Any] = [] + block_count = decoder.read_int() + while block_count != 0: + if block_count < 0: + block_count = -block_count + _ = decoder.read_int() + if self._is_int_list: + read_items.extend(decoder.read_ints(block_count)) + else: + for _ in range(block_count): + read_items.append(self.element.read(decoder)) + block_count = decoder.read_int() + return read_items + + def skip(self, decoder: BinaryDecoder) -> None: + _skip_map_array(decoder, lambda: self.element.skip(decoder)) + + def __hash__(self) -> int: + """Return a hashed representation of the ListReader class.""" + return self._hash + + +# Represent an empty dict as a singleton +EMPTY_DICT: dict[Any, Any] = {} + + +@dataclass(frozen=False, init=False) +class MapReader(Reader): + __slots__ = ("key", "value", "_is_int_int", "_is_int_bytes", "_key_reader", "_value_reader", "_hash") + key: Reader + value: Reader + + def __init__(self, key: Reader, value: Reader) -> None: + super().__init__() + self.key = key + self.value = value + if isinstance(self.key, IntegerReader): + self._is_int_int = isinstance(self.value, IntegerReader) + self._is_int_bytes = isinstance(self.value, BinaryReader) + else: + self._is_int_int = False + self._is_int_bytes = False + self._key_reader = self.key.read + self._value_reader = self.value.read + self._hash = hash((self.key, self.value)) + + def _read_int_int(self, decoder: BinaryDecoder) -> Mapping[int, int]: + """Read a mapping from int to int from the decoder. + + Read a map of ints to ints from the decoder, since this is such a common + data type, it is optimized to be faster than the generic map reader, by + using a lazy dict. + + The time it takes to create the python dictionary is much larger than + the time it takes to read the data from the decoder as an array, so the + lazy dict defers creating the python dictionary until it is actually + accessed. + + """ + block_count = decoder.read_int() + + # Often times the map is empty, so we can just return an empty dict without + # instancing the LazyDict + if block_count == 0: + return EMPTY_DICT + + contents_array: List[Tuple[int, ...]] = [] + + while block_count != 0: + if block_count < 0: + block_count = -block_count + # We ignore the block size for now + decoder.skip_int() + + # Since the integers are encoding right next to each other + # just read them all at once. + contents_array.append(decoder.read_ints(block_count * 2)) + block_count = decoder.read_int() + + return LazyDict(contents_array) + + def read(self, decoder: BinaryDecoder) -> Mapping[Any, Any]: + read_items: dict[Any, Any] = {} + + if self._is_int_int or self._is_int_bytes: + if self._is_int_int: + return self._read_int_int(decoder) + + block_count = decoder.read_int() + while block_count != 0: + if block_count < 0: + block_count = -block_count + # We ignore the block size for now + _ = decoder.read_int() + decoder.read_int_bytes_dict(block_count, read_items) + block_count = decoder.read_int() + else: + block_count = decoder.read_int() + while block_count != 0: + if block_count < 0: + block_count = -block_count + # We ignore the block size for now + _ = decoder.read_int() + for _ in range(block_count): + key = self._key_reader(decoder) + read_items[key] = self._value_reader(decoder) + block_count = decoder.read_int() + + return read_items + + def skip(self, decoder: BinaryDecoder) -> None: + def skip() -> None: + self.key.skip(decoder) + self.value.skip(decoder) + + _skip_map_array(decoder, skip) + + def __hash__(self) -> int: + """Return a hashed representation of the MapReader class.""" + return self._hash diff --git a/pyiceberg/avro/resolver.py b/pyiceberg/avro/resolver.py new file mode 100644 index 0000000000..8b2daeb7c7 --- /dev/null +++ b/pyiceberg/avro/resolver.py @@ -0,0 +1,397 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# pylint: disable=arguments-renamed,unused-argument +from enum import Enum +from typing import ( + Callable, + Dict, + List, + Optional, + Tuple, + Union, +) + +from pyiceberg.avro.decoder import BinaryDecoder +from pyiceberg.avro.reader import ( + BinaryReader, + BooleanReader, + DateReader, + DecimalReader, + DefaultReader, + DoubleReader, + FixedReader, + FloatReader, + IntegerReader, + ListReader, + MapReader, + NoneReader, + OptionReader, + Reader, + StringReader, + StructReader, + TimeReader, + TimestampReader, + TimestamptzReader, + UUIDReader, +) +from pyiceberg.avro.writer import ( + BinaryWriter, + BooleanWriter, + DateWriter, + DecimalWriter, + DoubleWriter, + FixedWriter, + FloatWriter, + IntegerWriter, + ListWriter, + MapWriter, + OptionWriter, + StringWriter, + StructWriter, + TimestamptzWriter, + TimestampWriter, + TimeWriter, + UUIDWriter, + Writer, +) +from pyiceberg.exceptions import ResolveError +from pyiceberg.schema import ( + PartnerAccessor, + PrimitiveWithPartnerVisitor, + Schema, + SchemaVisitorPerPrimitiveType, + promote, + visit, + visit_with_partner, +) +from pyiceberg.typedef import EMPTY_DICT, Record, StructProtocol +from pyiceberg.types import ( + BinaryType, + BooleanType, + DateType, + DecimalType, + DoubleType, + FixedType, + FloatType, + IcebergType, + IntegerType, + ListType, + LongType, + MapType, + NestedField, + PrimitiveType, + StringType, + StructType, + TimestampType, + TimestamptzType, + TimeType, + UUIDType, +) + +STRUCT_ROOT = -1 + + +def construct_reader( + file_schema: Union[Schema, IcebergType], read_types: Dict[int, Callable[..., StructProtocol]] = EMPTY_DICT +) -> Reader: + """Construct a reader from a file schema. + + Args: + file_schema (Schema | IcebergType): The schema of the Avro file. + + Raises: + NotImplementedError: If attempting to resolve an unrecognized object type. + """ + return resolve(file_schema, file_schema, read_types) + + +def construct_writer(file_schema: Union[Schema, IcebergType]) -> Writer: + """Construct a writer from a file schema. + + Args: + file_schema (Schema | IcebergType): The schema of the Avro file. + + Raises: + NotImplementedError: If attempting to resolve an unrecognized object type. + """ + return visit(file_schema, ConstructWriter()) + + +class ConstructWriter(SchemaVisitorPerPrimitiveType[Writer]): + """Construct a writer tree from an Iceberg schema.""" + + def schema(self, schema: Schema, struct_result: Writer) -> Writer: + return struct_result + + def struct(self, struct: StructType, field_results: List[Writer]) -> Writer: + return StructWriter(tuple(field_results)) + + def field(self, field: NestedField, field_result: Writer) -> Writer: + return field_result if field.required else OptionWriter(field_result) + + def list(self, list_type: ListType, element_result: Writer) -> Writer: + return ListWriter(element_result) + + def map(self, map_type: MapType, key_result: Writer, value_result: Writer) -> Writer: + return MapWriter(key_result, value_result) + + def visit_fixed(self, fixed_type: FixedType) -> Writer: + return FixedWriter(len(fixed_type)) + + def visit_decimal(self, decimal_type: DecimalType) -> Writer: + return DecimalWriter(decimal_type.precision, decimal_type.scale) + + def visit_boolean(self, boolean_type: BooleanType) -> Writer: + return BooleanWriter() + + def visit_integer(self, integer_type: IntegerType) -> Writer: + return IntegerWriter() + + def visit_long(self, long_type: LongType) -> Writer: + return IntegerWriter() + + def visit_float(self, float_type: FloatType) -> Writer: + return FloatWriter() + + def visit_double(self, double_type: DoubleType) -> Writer: + return DoubleWriter() + + def visit_date(self, date_type: DateType) -> Writer: + return DateWriter() + + def visit_time(self, time_type: TimeType) -> Writer: + return TimeWriter() + + def visit_timestamp(self, timestamp_type: TimestampType) -> Writer: + return TimestampWriter() + + def visit_timestamptz(self, timestamptz_type: TimestamptzType) -> Writer: + return TimestamptzWriter() + + def visit_string(self, string_type: StringType) -> Writer: + return StringWriter() + + def visit_uuid(self, uuid_type: UUIDType) -> Writer: + return UUIDWriter() + + def visit_binary(self, binary_type: BinaryType) -> Writer: + return BinaryWriter() + + +def resolve( + file_schema: Union[Schema, IcebergType], + read_schema: Union[Schema, IcebergType], + read_types: Dict[int, Callable[..., StructProtocol]] = EMPTY_DICT, + read_enums: Dict[int, Callable[..., Enum]] = EMPTY_DICT, +) -> Reader: + """Resolve the file and read schema to produce a reader. + + Args: + file_schema (Schema | IcebergType): The schema of the Avro file. + read_schema (Schema | IcebergType): The requested read schema which is equal, subset or superset of the file schema. + read_types (Dict[int, Callable[..., StructProtocol]]): A dict of types to use for struct data. + read_enums (Dict[int, Callable[..., Enum]]): A dict of fields that have to be converted to an enum. + + Raises: + NotImplementedError: If attempting to resolve an unrecognized object type. + """ + return visit_with_partner( + file_schema, read_schema, SchemaResolver(read_types, read_enums), SchemaPartnerAccessor() + ) # type: ignore + + +class EnumReader(Reader): + """An Enum reader to wrap primitive values into an Enum.""" + + __slots__ = ("enum", "reader") + + enum: Callable[..., Enum] + reader: Reader + + def __init__(self, enum: Callable[..., Enum], reader: Reader) -> None: + self.enum = enum + self.reader = reader + + def read(self, decoder: BinaryDecoder) -> Enum: + return self.enum(self.reader.read(decoder)) + + def skip(self, decoder: BinaryDecoder) -> None: + pass + + +class SchemaResolver(PrimitiveWithPartnerVisitor[IcebergType, Reader]): + __slots__ = ("read_types", "read_enums", "context") + read_types: Dict[int, Callable[..., StructProtocol]] + read_enums: Dict[int, Callable[..., Enum]] + context: List[int] + + def __init__( + self, + read_types: Dict[int, Callable[..., StructProtocol]] = EMPTY_DICT, + read_enums: Dict[int, Callable[..., Enum]] = EMPTY_DICT, + ) -> None: + self.read_types = read_types + self.read_enums = read_enums + self.context = [] + + def schema(self, schema: Schema, expected_schema: Optional[IcebergType], result: Reader) -> Reader: + return result + + def before_field(self, field: NestedField, field_partner: Optional[NestedField]) -> None: + self.context.append(field.field_id) + + def after_field(self, field: NestedField, field_partner: Optional[NestedField]) -> None: + self.context.pop() + + def struct(self, struct: StructType, expected_struct: Optional[IcebergType], field_readers: List[Reader]) -> Reader: + read_struct_id = self.context[STRUCT_ROOT] if len(self.context) > 0 else STRUCT_ROOT + struct_callable = self.read_types.get(read_struct_id, Record) + + if not expected_struct: + return StructReader(tuple(enumerate(field_readers)), struct_callable, struct) + + if not isinstance(expected_struct, StructType): + raise ResolveError(f"File/read schema are not aligned for struct, got {expected_struct}") + + expected_positions: Dict[int, int] = {field.field_id: pos for pos, field in enumerate(expected_struct.fields)} + + # first, add readers for the file fields that must be in order + results: List[Tuple[Optional[int], Reader]] = [ + ( + expected_positions.get(field.field_id), + # Check if we need to convert it to an Enum + result_reader if not (enum_type := self.read_enums.get(field.field_id)) else EnumReader(enum_type, result_reader), + ) + for field, result_reader in zip(struct.fields, field_readers) + ] + + file_fields = {field.field_id: field for field in struct.fields} + for pos, read_field in enumerate(expected_struct.fields): + if read_field.field_id not in file_fields: + if isinstance(read_field, NestedField) and read_field.initial_default is not None: + # The field is not in the file, but there is a default value + # and that one can be required + results.append((pos, DefaultReader(read_field.initial_default))) + elif read_field.required: + raise ResolveError(f"{read_field} is non-optional, and not part of the file schema") + else: + # Just set the new field to None + results.append((pos, NoneReader())) + + return StructReader(tuple(results), struct_callable, expected_struct) + + def field(self, field: NestedField, expected_field: Optional[IcebergType], field_reader: Reader) -> Reader: + return field_reader if field.required else OptionReader(field_reader) + + def list(self, list_type: ListType, expected_list: Optional[IcebergType], element_reader: Reader) -> Reader: + if expected_list and not isinstance(expected_list, ListType): + raise ResolveError(f"File/read schema are not aligned for list, got {expected_list}") + + return ListReader(element_reader if list_type.element_required else OptionReader(element_reader)) + + def map(self, map_type: MapType, expected_map: Optional[IcebergType], key_reader: Reader, value_reader: Reader) -> Reader: + if expected_map and not isinstance(expected_map, MapType): + raise ResolveError(f"File/read schema are not aligned for map, got {expected_map}") + + return MapReader(key_reader, value_reader if map_type.value_required else OptionReader(value_reader)) + + def primitive(self, primitive: PrimitiveType, expected_primitive: Optional[IcebergType]) -> Reader: + if expected_primitive is not None: + if not isinstance(expected_primitive, PrimitiveType): + raise ResolveError(f"File/read schema are not aligned for {primitive}, got {expected_primitive}") + + # ensure that the type can be projected to the expected + if primitive != expected_primitive: + promote(primitive, expected_primitive) + + return super().primitive(primitive, expected_primitive) + + def visit_boolean(self, boolean_type: BooleanType, partner: Optional[IcebergType]) -> Reader: + return BooleanReader() + + def visit_integer(self, integer_type: IntegerType, partner: Optional[IcebergType]) -> Reader: + return IntegerReader() + + def visit_long(self, long_type: LongType, partner: Optional[IcebergType]) -> Reader: + return IntegerReader() + + def visit_float(self, float_type: FloatType, partner: Optional[IcebergType]) -> Reader: + return FloatReader() + + def visit_double(self, double_type: DoubleType, partner: Optional[IcebergType]) -> Reader: + return DoubleReader() + + def visit_decimal(self, decimal_type: DecimalType, partner: Optional[IcebergType]) -> Reader: + return DecimalReader(decimal_type.precision, decimal_type.scale) + + def visit_date(self, date_type: DateType, partner: Optional[IcebergType]) -> Reader: + return DateReader() + + def visit_time(self, time_type: TimeType, partner: Optional[IcebergType]) -> Reader: + return TimeReader() + + def visit_timestamp(self, timestamp_type: TimestampType, partner: Optional[IcebergType]) -> Reader: + return TimestampReader() + + def visit_timestamptz(self, timestamptz_type: TimestamptzType, partner: Optional[IcebergType]) -> Reader: + return TimestamptzReader() + + def visit_string(self, string_type: StringType, partner: Optional[IcebergType]) -> Reader: + return StringReader() + + def visit_uuid(self, uuid_type: UUIDType, partner: Optional[IcebergType]) -> Reader: + return UUIDReader() + + def visit_fixed(self, fixed_type: FixedType, partner: Optional[IcebergType]) -> Reader: + return FixedReader(len(fixed_type)) + + def visit_binary(self, binary_type: BinaryType, partner: Optional[IcebergType]) -> Reader: + return BinaryReader() + + +class SchemaPartnerAccessor(PartnerAccessor[IcebergType]): + def schema_partner(self, partner: Optional[IcebergType]) -> Optional[IcebergType]: + if isinstance(partner, Schema): + return partner.as_struct() + + raise ResolveError(f"File/read schema are not aligned for schema, got {partner}") + + def field_partner(self, partner: Optional[IcebergType], field_id: int, field_name: str) -> Optional[IcebergType]: + if isinstance(partner, StructType): + field = partner.field(field_id) + else: + raise ResolveError(f"File/read schema are not aligned for struct, got {partner}") + + return field.field_type if field else None + + def list_element_partner(self, partner_list: Optional[IcebergType]) -> Optional[IcebergType]: + if isinstance(partner_list, ListType): + return partner_list.element_type + + raise ResolveError(f"File/read schema are not aligned for list, got {partner_list}") + + def map_key_partner(self, partner_map: Optional[IcebergType]) -> Optional[IcebergType]: + if isinstance(partner_map, MapType): + return partner_map.key_type + + raise ResolveError(f"File/read schema are not aligned for map, got {partner_map}") + + def map_value_partner(self, partner_map: Optional[IcebergType]) -> Optional[IcebergType]: + if isinstance(partner_map, MapType): + return partner_map.value_type + + raise ResolveError(f"File/read schema are not aligned for map, got {partner_map}") diff --git a/pyiceberg/avro/writer.py b/pyiceberg/avro/writer.py new file mode 100644 index 0000000000..ad6a755614 --- /dev/null +++ b/pyiceberg/avro/writer.py @@ -0,0 +1,203 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +""" +Classes for building the Writer tree. + +Constructing a writer tree from the schema makes it easy +to decouple the writing implementation from the schema. +""" +from __future__ import annotations + +from abc import abstractmethod +from dataclasses import dataclass +from dataclasses import field as dataclassfield +from typing import ( + Any, + Dict, + List, + Tuple, +) +from uuid import UUID + +from pyiceberg.avro.encoder import BinaryEncoder +from pyiceberg.typedef import Record +from pyiceberg.utils.decimal import decimal_required_bytes, decimal_to_bytes +from pyiceberg.utils.singleton import Singleton + + +class Writer(Singleton): + @abstractmethod + def write(self, encoder: BinaryEncoder, val: Any) -> Any: + ... + + def __repr__(self) -> str: + """Return string representation of this object.""" + return f"{self.__class__.__name__}()" + + +class NoneWriter(Writer): + def write(self, _: BinaryEncoder, __: Any) -> None: + pass + + +class BooleanWriter(Writer): + def write(self, encoder: BinaryEncoder, val: bool) -> None: + encoder.write_boolean(val) + + +class IntegerWriter(Writer): + """Longs and ints are encoded the same way, and there is no long in Python.""" + + def write(self, encoder: BinaryEncoder, val: int) -> None: + encoder.write_int(val) + + +class FloatWriter(Writer): + def write(self, encoder: BinaryEncoder, val: float) -> None: + encoder.write_float(val) + + +class DoubleWriter(Writer): + def write(self, encoder: BinaryEncoder, val: float) -> None: + encoder.write_double(val) + + +class DateWriter(Writer): + def write(self, encoder: BinaryEncoder, val: int) -> None: + encoder.write_int(val) + + +class TimeWriter(Writer): + def write(self, encoder: BinaryEncoder, val: int) -> None: + encoder.write_int(val) + + +class TimestampWriter(Writer): + def write(self, encoder: BinaryEncoder, val: int) -> None: + encoder.write_int(val) + + +class TimestamptzWriter(Writer): + def write(self, encoder: BinaryEncoder, val: int) -> None: + encoder.write_int(val) + + +class StringWriter(Writer): + def write(self, encoder: BinaryEncoder, val: Any) -> None: + encoder.write_utf8(val) + + +class UUIDWriter(Writer): + def write(self, encoder: BinaryEncoder, val: UUID) -> None: + encoder.write(val.bytes) + + +@dataclass(frozen=True) +class FixedWriter(Writer): + _len: int = dataclassfield() + + def write(self, encoder: BinaryEncoder, val: bytes) -> None: + if len(val) != self._len: + raise ValueError(f"Expected {self._len} bytes, got {len(val)}") + encoder.write(val) + + def __len__(self) -> int: + """Return the length of this object.""" + return self._len + + def __repr__(self) -> str: + """Return string representation of this object.""" + return f"FixedWriter({self._len})" + + +class BinaryWriter(Writer): + """Variable byte length writer.""" + + def write(self, encoder: BinaryEncoder, val: Any) -> None: + encoder.write_bytes(val) + + +@dataclass(frozen=True) +class DecimalWriter(Writer): + precision: int = dataclassfield() + scale: int = dataclassfield() + + def write(self, encoder: BinaryEncoder, val: Any) -> None: + return encoder.write(decimal_to_bytes(val, byte_length=decimal_required_bytes(self.precision))) + + def __repr__(self) -> str: + """Return string representation of this object.""" + return f"DecimalWriter({self.precision}, {self.scale})" + + +@dataclass(frozen=True) +class OptionWriter(Writer): + option: Writer = dataclassfield() + + def write(self, encoder: BinaryEncoder, val: Any) -> None: + if val is not None: + encoder.write_int(1) + self.option.write(encoder, val) + else: + encoder.write_int(0) + + +@dataclass(frozen=True) +class StructWriter(Writer): + field_writers: Tuple[Writer, ...] = dataclassfield() + + def write(self, encoder: BinaryEncoder, val: Record) -> None: + for writer, value in zip(self.field_writers, val.record_fields()): + writer.write(encoder, value) + + def __eq__(self, other: Any) -> bool: + """Implement the equality operator for this object.""" + return self.field_writers == other.field_writers if isinstance(other, StructWriter) else False + + def __repr__(self) -> str: + """Return string representation of this object.""" + return f"StructWriter({','.join(repr(field) for field in self.field_writers)})" + + def __hash__(self) -> int: + """Return the hash of the writer as hash of this object.""" + return hash(self.field_writers) + + +@dataclass(frozen=True) +class ListWriter(Writer): + element_writer: Writer + + def write(self, encoder: BinaryEncoder, val: List[Any]) -> None: + encoder.write_int(len(val)) + for v in val: + self.element_writer.write(encoder, v) + if len(val) > 0: + encoder.write_int(0) + + +@dataclass(frozen=True) +class MapWriter(Writer): + key_writer: Writer + value_writer: Writer + + def write(self, encoder: BinaryEncoder, val: Dict[Any, Any]) -> None: + encoder.write_int(len(val)) + for k, v in val.items(): + self.key_writer.write(encoder, k) + self.value_writer.write(encoder, v) + if len(val) > 0: + encoder.write_int(0) diff --git a/pyiceberg/catalog/__init__.py b/pyiceberg/catalog/__init__.py new file mode 100644 index 0000000000..2577a97bc1 --- /dev/null +++ b/pyiceberg/catalog/__init__.py @@ -0,0 +1,606 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from __future__ import annotations + +import logging +import uuid +from abc import ABC, abstractmethod +from dataclasses import dataclass +from enum import Enum +from typing import ( + Callable, + Dict, + List, + Optional, + Set, + Tuple, + Type, + Union, + cast, +) + +from pyiceberg.exceptions import NoSuchNamespaceError, NoSuchTableError, NotInstalledError +from pyiceberg.io import FileIO, load_file_io +from pyiceberg.manifest import ManifestFile +from pyiceberg.partitioning import UNPARTITIONED_PARTITION_SPEC, PartitionSpec +from pyiceberg.schema import Schema +from pyiceberg.serializers import ToOutputFile +from pyiceberg.table import ( + CommitTableRequest, + CommitTableResponse, + Table, + TableMetadata, +) +from pyiceberg.table.sorting import UNSORTED_SORT_ORDER, SortOrder +from pyiceberg.typedef import ( + EMPTY_DICT, + Identifier, + Properties, + RecursiveDict, +) +from pyiceberg.utils.config import Config, merge_config + +logger = logging.getLogger(__name__) + +_ENV_CONFIG = Config() + +TOKEN = "token" +TYPE = "type" +ICEBERG = "iceberg" +TABLE_TYPE = "table_type" +WAREHOUSE_LOCATION = "warehouse" +METADATA_LOCATION = "metadata_location" +PREVIOUS_METADATA_LOCATION = "previous_metadata_location" +MANIFEST = "manifest" +MANIFEST_LIST = "manifest list" +PREVIOUS_METADATA = "previous metadata" +METADATA = "metadata" +URI = "uri" +LOCATION = "location" +EXTERNAL_TABLE = "EXTERNAL_TABLE" + + +class CatalogType(Enum): + REST = "rest" + HIVE = "hive" + GLUE = "glue" + DYNAMODB = "dynamodb" + SQL = "sql" + + +def load_rest(name: str, conf: Properties) -> Catalog: + from pyiceberg.catalog.rest import RestCatalog + + return RestCatalog(name, **conf) + + +def load_hive(name: str, conf: Properties) -> Catalog: + try: + from pyiceberg.catalog.hive import HiveCatalog + + return HiveCatalog(name, **conf) + except ImportError as exc: + raise NotInstalledError("Apache Hive support not installed: pip install 'pyiceberg[hive]'") from exc + + +def load_glue(name: str, conf: Properties) -> Catalog: + try: + from pyiceberg.catalog.glue import GlueCatalog + + return GlueCatalog(name, **conf) + except ImportError as exc: + raise NotInstalledError("AWS glue support not installed: pip install 'pyiceberg[glue]'") from exc + + +def load_dynamodb(name: str, conf: Properties) -> Catalog: + try: + from pyiceberg.catalog.dynamodb import DynamoDbCatalog + + return DynamoDbCatalog(name, **conf) + except ImportError as exc: + raise NotInstalledError("AWS DynamoDB support not installed: pip install 'pyiceberg[dynamodb]'") from exc + + +def load_sql(name: str, conf: Properties) -> Catalog: + try: + from pyiceberg.catalog.sql import SqlCatalog + + return SqlCatalog(name, **conf) + except ImportError as exc: + raise NotInstalledError("SQLAlchemy support not installed: pip install 'pyiceberg[sql-postgres]'") from exc + + +AVAILABLE_CATALOGS: dict[CatalogType, Callable[[str, Properties], Catalog]] = { + CatalogType.REST: load_rest, + CatalogType.HIVE: load_hive, + CatalogType.GLUE: load_glue, + CatalogType.DYNAMODB: load_dynamodb, + CatalogType.SQL: load_sql, +} + + +def infer_catalog_type(name: str, catalog_properties: RecursiveDict) -> Optional[CatalogType]: + """Try to infer the type based on the dict. + + Args: + name: Name of the catalog. + catalog_properties: Catalog properties. + + Returns: + The inferred type based on the provided properties. + + Raises: + ValueError: Raises a ValueError in case properties are missing, or the wrong type. + """ + if uri := catalog_properties.get("uri"): + if isinstance(uri, str): + if uri.startswith("http"): + return CatalogType.REST + elif uri.startswith("thrift"): + return CatalogType.HIVE + elif uri.startswith("postgresql"): + return CatalogType.SQL + else: + raise ValueError(f"Could not infer the catalog type from the uri: {uri}") + else: + raise ValueError(f"Expects the URI to be a string, got: {type(uri)}") + raise ValueError( + f"URI missing, please provide using --uri, the config or environment variable PYICEBERG_CATALOG__{name.upper()}__URI" + ) + + +def load_catalog(name: Optional[str] = None, **properties: Optional[str]) -> Catalog: + """Load the catalog based on the properties. + + Will look up the properties from the config, based on the name. + + Args: + name: The name of the catalog. + properties: The properties that are used next to the configuration. + + Returns: + An initialized Catalog. + + Raises: + ValueError: Raises a ValueError in case properties are missing or malformed, + or if it could not determine the catalog based on the properties. + """ + if name is None: + name = _ENV_CONFIG.get_default_catalog_name() + + env = _ENV_CONFIG.get_catalog_config(name) + conf: RecursiveDict = merge_config(env or {}, cast(RecursiveDict, properties)) + + catalog_type: Optional[CatalogType] + provided_catalog_type = conf.get(TYPE) + + catalog_type = None + if provided_catalog_type and isinstance(provided_catalog_type, str): + catalog_type = CatalogType[provided_catalog_type.upper()] + elif not provided_catalog_type: + catalog_type = infer_catalog_type(name, conf) + + if catalog_type: + return AVAILABLE_CATALOGS[catalog_type](name, cast(Dict[str, str], conf)) + + raise ValueError(f"Could not initialize catalog with the following properties: {properties}") + + +def delete_files(io: FileIO, files_to_delete: Set[str], file_type: str) -> None: + """Delete files. + + Log warnings if failing to delete any file. + + Args: + io: The FileIO used to delete the object. + files_to_delete: A set of file paths to be deleted. + file_type: The type of the file. + """ + for file in files_to_delete: + try: + io.delete(file) + except OSError as exc: + logger.warning(msg=f"Failed to delete {file_type} file {file}", exc_info=exc) + + +def delete_data_files(io: FileIO, manifests_to_delete: List[ManifestFile]) -> None: + """Delete data files linked to given manifests. + + Log warnings if failing to delete any file. + + Args: + io: The FileIO used to delete the object. + manifests_to_delete: A list of manifest contains paths of data files to be deleted. + """ + deleted_files: dict[str, bool] = {} + for manifest_file in manifests_to_delete: + for entry in manifest_file.fetch_manifest_entry(io, discard_deleted=False): + path = entry.data_file.file_path + if not deleted_files.get(path, False): + try: + io.delete(path) + except OSError as exc: + logger.warning(msg=f"Failed to delete data file {path}", exc_info=exc) + deleted_files[path] = True + + +@dataclass +class PropertiesUpdateSummary: + removed: List[str] + updated: List[str] + missing: List[str] + + +class Catalog(ABC): + """Base Catalog for table operations like - create, drop, load, list and others. + + The catalog table APIs accept a table identifier, which is fully classified table name. The identifier can be a string or + tuple of strings. If the identifier is a string, it is split into a tuple on '.'. If it is a tuple, it is used as-is. + + The catalog namespace APIs follow a similar convention wherein they also accept a namespace identifier that can be a string + or tuple of strings. + + Attributes: + name (str): Name of the catalog. + properties (Properties): Catalog properties. + """ + + name: str + properties: Properties + + def __init__(self, name: str, **properties: str): + self.name = name + self.properties = properties + + def _load_file_io(self, properties: Properties = EMPTY_DICT, location: Optional[str] = None) -> FileIO: + return load_file_io({**self.properties, **properties}, location) + + @abstractmethod + def create_table( + self, + identifier: Union[str, Identifier], + schema: Schema, + location: Optional[str] = None, + partition_spec: PartitionSpec = UNPARTITIONED_PARTITION_SPEC, + sort_order: SortOrder = UNSORTED_SORT_ORDER, + properties: Properties = EMPTY_DICT, + ) -> Table: + """Create a table. + + Args: + identifier (str | Identifier): Table identifier. + schema (Schema): Table's schema. + location (str | None): Location for the table. Optional Argument. + partition_spec (PartitionSpec): PartitionSpec for the table. + sort_order (SortOrder): SortOrder for the table. + properties (Properties): Table properties that can be a string based dictionary. + + Returns: + Table: the created table instance. + + Raises: + TableAlreadyExistsError: If a table with the name already exists. + """ + + @abstractmethod + def load_table(self, identifier: Union[str, Identifier]) -> Table: + """Load the table's metadata and returns the table instance. + + You can also use this method to check for table existence using 'try catalog.table() except NoSuchTableError'. + Note: This method doesn't scan data stored in the table. + + Args: + identifier (str | Identifier): Table identifier. + + Returns: + Table: the table instance with its metadata. + + Raises: + NoSuchTableError: If a table with the name does not exist. + """ + + @abstractmethod + def register_table(self, identifier: Union[str, Identifier], metadata_location: str) -> Table: + """Register a new table using existing metadata. + + Args: + identifier Union[str, Identifier]: Table identifier for the table + metadata_location str: The location to the metadata + + Returns: + Table: The newly registered table + + Raises: + TableAlreadyExistsError: If the table already exists + """ + + @abstractmethod + def drop_table(self, identifier: Union[str, Identifier]) -> None: + """Drop a table. + + Args: + identifier (str | Identifier): Table identifier. + + Raises: + NoSuchTableError: If a table with the name does not exist. + """ + + @abstractmethod + def rename_table(self, from_identifier: Union[str, Identifier], to_identifier: Union[str, Identifier]) -> Table: + """Rename a fully classified table name. + + Args: + from_identifier (str | Identifier): Existing table identifier. + to_identifier (str | Identifier): New table identifier. + + Returns: + Table: the updated table instance with its metadata. + + Raises: + NoSuchTableError: If a table with the name does not exist. + """ + + @abstractmethod + def _commit_table(self, table_request: CommitTableRequest) -> CommitTableResponse: + """Update one or more tables. + + Args: + table_request (CommitTableRequest): The table requests to be carried out. + + Returns: + CommitTableResponse: The updated metadata. + + Raises: + NoSuchTableError: If a table with the given identifier does not exist. + """ + + @abstractmethod + def create_namespace(self, namespace: Union[str, Identifier], properties: Properties = EMPTY_DICT) -> None: + """Create a namespace in the catalog. + + Args: + namespace (str | Identifier): Namespace identifier. + properties (Properties): A string dictionary of properties for the given namespace. + + Raises: + NamespaceAlreadyExistsError: If a namespace with the given name already exists. + """ + + @abstractmethod + def drop_namespace(self, namespace: Union[str, Identifier]) -> None: + """Drop a namespace. + + Args: + namespace (str | Identifier): Namespace identifier. + + Raises: + NoSuchNamespaceError: If a namespace with the given name does not exist. + NamespaceNotEmptyError: If the namespace is not empty. + """ + + @abstractmethod + def list_tables(self, namespace: Union[str, Identifier]) -> List[Identifier]: + """List tables under the given namespace in the catalog. + + If namespace not provided, will list all tables in the catalog. + + Args: + namespace (str | Identifier): Namespace identifier to search. + + Returns: + List[Identifier]: list of table identifiers. + + Raises: + NoSuchNamespaceError: If a namespace with the given name does not exist. + """ + + @abstractmethod + def list_namespaces(self, namespace: Union[str, Identifier] = ()) -> List[Identifier]: + """List namespaces from the given namespace. If not given, list top-level namespaces from the catalog. + + Args: + namespace (str | Identifier): Namespace identifier to search. + + Returns: + List[Identifier]: a List of namespace identifiers. + + Raises: + NoSuchNamespaceError: If a namespace with the given name does not exist. + """ + + @abstractmethod + def load_namespace_properties(self, namespace: Union[str, Identifier]) -> Properties: + """Get properties for a namespace. + + Args: + namespace (str | Identifier): Namespace identifier. + + Returns: + Properties: Properties for the given namespace. + + Raises: + NoSuchNamespaceError: If a namespace with the given name does not exist. + """ + + @abstractmethod + def update_namespace_properties( + self, namespace: Union[str, Identifier], removals: Optional[Set[str]] = None, updates: Properties = EMPTY_DICT + ) -> PropertiesUpdateSummary: + """Remove provided property keys and updates properties for a namespace. + + Args: + namespace (str | Identifier): Namespace identifier. + removals (Set[str]): Set of property keys that need to be removed. Optional Argument. + updates (Properties): Properties to be updated for the given namespace. + + Raises: + NoSuchNamespaceError: If a namespace with the given name does not exist. + ValueError: If removals and updates have overlapping keys. + """ + + @staticmethod + def identifier_to_tuple(identifier: Union[str, Identifier]) -> Identifier: + """Parse an identifier to a tuple. + + If the identifier is a string, it is split into a tuple on '.'. If it is a tuple, it is used as-is. + + Args: + identifier (str | Identifier: an identifier, either a string or tuple of strings. + + Returns: + Identifier: a tuple of strings. + """ + return identifier if isinstance(identifier, tuple) else tuple(str.split(identifier, ".")) + + @staticmethod + def table_name_from(identifier: Union[str, Identifier]) -> str: + """Extract table name from a table identifier. + + Args: + identifier (str | Identifier: a table identifier. + + Returns: + str: Table name. + """ + return Catalog.identifier_to_tuple(identifier)[-1] + + @staticmethod + def namespace_from(identifier: Union[str, Identifier]) -> Identifier: + """Extract table namespace from a table identifier. + + Args: + identifier (Union[str, Identifier]): a table identifier. + + Returns: + Identifier: Namespace identifier. + """ + return Catalog.identifier_to_tuple(identifier)[:-1] + + @staticmethod + def _check_for_overlap(removals: Optional[Set[str]], updates: Properties) -> None: + if updates and removals: + overlap = set(removals) & set(updates.keys()) + if overlap: + raise ValueError(f"Updates and deletes have an overlap: {overlap}") + + def _resolve_table_location(self, location: Optional[str], database_name: str, table_name: str) -> str: + if not location: + return self._get_default_warehouse_location(database_name, table_name) + return location + + def _get_default_warehouse_location(self, database_name: str, table_name: str) -> str: + database_properties = self.load_namespace_properties(database_name) + if database_location := database_properties.get(LOCATION): + database_location = database_location.rstrip("/") + return f"{database_location}/{table_name}" + + if warehouse_path := self.properties.get(WAREHOUSE_LOCATION): + warehouse_path = warehouse_path.rstrip("/") + return f"{warehouse_path}/{database_name}.db/{table_name}" + + raise ValueError("No default path is set, please specify a location when creating a table") + + @staticmethod + def identifier_to_database( + identifier: Union[str, Identifier], err: Union[Type[ValueError], Type[NoSuchNamespaceError]] = ValueError + ) -> str: + tuple_identifier = Catalog.identifier_to_tuple(identifier) + if len(tuple_identifier) != 1: + raise err(f"Invalid database, hierarchical namespaces are not supported: {identifier}") + + return tuple_identifier[0] + + @staticmethod + def identifier_to_database_and_table( + identifier: Union[str, Identifier], + err: Union[Type[ValueError], Type[NoSuchTableError], Type[NoSuchNamespaceError]] = ValueError, + ) -> Tuple[str, str]: + tuple_identifier = Catalog.identifier_to_tuple(identifier) + if len(tuple_identifier) != 2: + raise err(f"Invalid path, hierarchical namespaces are not supported: {identifier}") + + return tuple_identifier[0], tuple_identifier[1] + + def purge_table(self, identifier: Union[str, Identifier]) -> None: + """Drop a table and purge all data and metadata files. + + Note: This method only logs warning rather than raise exception when encountering file deletion failure. + + Args: + identifier (str | Identifier): Table identifier. + + Raises: + NoSuchTableError: If a table with the name does not exist, or the identifier is invalid. + """ + table = self.load_table(identifier) + self.drop_table(identifier) + io = load_file_io(self.properties, table.metadata_location) + metadata = table.metadata + manifest_lists_to_delete = set() + manifests_to_delete: List[ManifestFile] = [] + for snapshot in metadata.snapshots: + manifests_to_delete += snapshot.manifests(io) + if snapshot.manifest_list is not None: + manifest_lists_to_delete.add(snapshot.manifest_list) + + manifest_paths_to_delete = {manifest.manifest_path for manifest in manifests_to_delete} + prev_metadata_files = {log.metadata_file for log in metadata.metadata_log} + + delete_data_files(io, manifests_to_delete) + delete_files(io, manifest_paths_to_delete, MANIFEST) + delete_files(io, manifest_lists_to_delete, MANIFEST_LIST) + delete_files(io, prev_metadata_files, PREVIOUS_METADATA) + delete_files(io, {table.metadata_location}, METADATA) + + @staticmethod + def _write_metadata(metadata: TableMetadata, io: FileIO, metadata_path: str) -> None: + ToOutputFile.table_metadata(metadata, io.new_output(metadata_path)) + + @staticmethod + def _get_metadata_location(location: str) -> str: + return f"{location}/metadata/00000-{uuid.uuid4()}.metadata.json" + + def _get_updated_props_and_update_summary( + self, current_properties: Properties, removals: Optional[Set[str]], updates: Properties + ) -> Tuple[PropertiesUpdateSummary, Properties]: + self._check_for_overlap(updates=updates, removals=removals) + updated_properties = dict(current_properties) + + removed: Set[str] = set() + updated: Set[str] = set() + + if removals: + for key in removals: + if key in updated_properties: + updated_properties.pop(key) + removed.add(key) + if updates: + for key, value in updates.items(): + updated_properties[key] = value + updated.add(key) + + expected_to_change = (removals or set()).difference(removed) + properties_update_summary = PropertiesUpdateSummary( + removed=list(removed or []), updated=list(updated or []), missing=list(expected_to_change) + ) + + return properties_update_summary, updated_properties + + def __repr__(self) -> str: + """Return the string representation of the Catalog class.""" + return f"{self.name} ({self.__class__})" diff --git a/pyiceberg/catalog/dynamodb.py b/pyiceberg/catalog/dynamodb.py new file mode 100644 index 0000000000..848ec03126 --- /dev/null +++ b/pyiceberg/catalog/dynamodb.py @@ -0,0 +1,796 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +import uuid +from time import time +from typing import ( + Any, + Dict, + List, + Optional, + Set, + Union, +) + +import boto3 + +from pyiceberg.catalog import ( + ICEBERG, + METADATA_LOCATION, + PREVIOUS_METADATA_LOCATION, + TABLE_TYPE, + Catalog, + Identifier, + Properties, + PropertiesUpdateSummary, +) +from pyiceberg.exceptions import ( + ConditionalCheckFailedException, + GenericDynamoDbError, + NamespaceAlreadyExistsError, + NamespaceNotEmptyError, + NoSuchIcebergTableError, + NoSuchNamespaceError, + NoSuchPropertyException, + NoSuchTableError, + TableAlreadyExistsError, +) +from pyiceberg.io import load_file_io +from pyiceberg.partitioning import UNPARTITIONED_PARTITION_SPEC, PartitionSpec +from pyiceberg.schema import Schema +from pyiceberg.serializers import FromInputFile +from pyiceberg.table import CommitTableRequest, CommitTableResponse, Table +from pyiceberg.table.metadata import new_table_metadata +from pyiceberg.table.sorting import UNSORTED_SORT_ORDER, SortOrder +from pyiceberg.typedef import EMPTY_DICT + +DYNAMODB_CLIENT = "dynamodb" + +DYNAMODB_COL_IDENTIFIER = "identifier" +DYNAMODB_COL_NAMESPACE = "namespace" +DYNAMODB_COL_VERSION = "v" +DYNAMODB_COL_UPDATED_AT = "updated_at" +DYNAMODB_COL_CREATED_AT = "created_at" +DYNAMODB_NAMESPACE = "NAMESPACE" +DYNAMODB_NAMESPACE_GSI = "namespace-identifier" +DYNAMODB_PAY_PER_REQUEST = "PAY_PER_REQUEST" + +DYNAMODB_TABLE_NAME = "table-name" +DYNAMODB_TABLE_NAME_DEFAULT = "iceberg" + +PROPERTY_KEY_PREFIX = "p." + +ACTIVE = "ACTIVE" +ITEM = "Item" + + +class DynamoDbCatalog(Catalog): + def __init__(self, name: str, **properties: str): + super().__init__(name, **properties) + self.dynamodb = boto3.client(DYNAMODB_CLIENT) + self.dynamodb_table_name = self.properties.get(DYNAMODB_TABLE_NAME, DYNAMODB_TABLE_NAME_DEFAULT) + self._ensure_catalog_table_exists_or_create() + + def _ensure_catalog_table_exists_or_create(self) -> None: + if self._dynamodb_table_exists(): + return None + + try: + self.dynamodb.create_table( + TableName=self.dynamodb_table_name, + AttributeDefinitions=CREATE_CATALOG_ATTRIBUTE_DEFINITIONS, + KeySchema=CREATE_CATALOG_KEY_SCHEMA, + GlobalSecondaryIndexes=CREATE_CATALOG_GLOBAL_SECONDARY_INDEXES, + BillingMode=DYNAMODB_PAY_PER_REQUEST, + ) + except ( + self.dynamodb.exceptions.ResourceInUseException, + self.dynamodb.exceptions.LimitExceededException, + self.dynamodb.exceptions.InternalServerError, + ) as e: + raise GenericDynamoDbError(e.message) from e + + def _dynamodb_table_exists(self) -> bool: + try: + response = self.dynamodb.describe_table(TableName=self.dynamodb_table_name) + except self.dynamodb.exceptions.ResourceNotFoundException: + return False + except self.dynamodb.exceptions.InternalServerError as e: + raise GenericDynamoDbError(e.message) from e + + if response["Table"]["TableStatus"] != ACTIVE: + raise GenericDynamoDbError(f"DynamoDB table for catalog {self.dynamodb_table_name} is not {ACTIVE}") + else: + return True + + def create_table( + self, + identifier: Union[str, Identifier], + schema: Schema, + location: Optional[str] = None, + partition_spec: PartitionSpec = UNPARTITIONED_PARTITION_SPEC, + sort_order: SortOrder = UNSORTED_SORT_ORDER, + properties: Properties = EMPTY_DICT, + ) -> Table: + """ + Create an Iceberg table. + + Args: + identifier: Table identifier. + schema: Table's schema. + location: Location for the table. Optional Argument. + partition_spec: PartitionSpec for the table. + sort_order: SortOrder for the table. + properties: Table properties that can be a string based dictionary. + + Returns: + Table: the created table instance. + + Raises: + AlreadyExistsError: If a table with the name already exists. + ValueError: If the identifier is invalid, or no path is given to store metadata. + + """ + database_name, table_name = self.identifier_to_database_and_table(identifier) + + location = self._resolve_table_location(location, database_name, table_name) + metadata_location = self._get_metadata_location(location=location) + metadata = new_table_metadata( + location=location, schema=schema, partition_spec=partition_spec, sort_order=sort_order, properties=properties + ) + io = load_file_io(properties=self.properties, location=metadata_location) + self._write_metadata(metadata, io, metadata_location) + + self._ensure_namespace_exists(database_name=database_name) + + try: + self._put_dynamo_item( + item=_get_create_table_item( + database_name=database_name, table_name=table_name, properties=properties, metadata_location=metadata_location + ), + condition_expression=f"attribute_not_exists({DYNAMODB_COL_IDENTIFIER})", + ) + except ConditionalCheckFailedException as e: + raise TableAlreadyExistsError(f"Table {database_name}.{table_name} already exists") from e + + return self.load_table(identifier=identifier) + + def register_table(self, identifier: Union[str, Identifier], metadata_location: str) -> Table: + """Register a new table using existing metadata. + + Args: + identifier Union[str, Identifier]: Table identifier for the table + metadata_location str: The location to the metadata + + Returns: + Table: The newly registered table + + Raises: + TableAlreadyExistsError: If the table already exists + """ + raise NotImplementedError + + def _commit_table(self, table_request: CommitTableRequest) -> CommitTableResponse: + """Update the table. + + Args: + table_request (CommitTableRequest): The table requests to be carried out. + + Returns: + CommitTableResponse: The updated metadata. + + Raises: + NoSuchTableError: If a table with the given identifier does not exist. + """ + raise NotImplementedError + + def load_table(self, identifier: Union[str, Identifier]) -> Table: + """ + Load the table's metadata and returns the table instance. + + You can also use this method to check for table existence using 'try catalog.table() except TableNotFoundError'. + Note: This method doesn't scan data stored in the table. + + Args: + identifier: Table identifier. + + Returns: + Table: the table instance with its metadata. + + Raises: + NoSuchTableError: If a table with the name does not exist, or the identifier is invalid. + """ + database_name, table_name = self.identifier_to_database_and_table(identifier, NoSuchTableError) + dynamo_table_item = self._get_iceberg_table_item(database_name=database_name, table_name=table_name) + return self._convert_dynamo_table_item_to_iceberg_table(dynamo_table_item=dynamo_table_item) + + def drop_table(self, identifier: Union[str, Identifier]) -> None: + """Drop a table. + + Args: + identifier: Table identifier. + + Raises: + NoSuchTableError: If a table with the name does not exist, or the identifier is invalid. + """ + database_name, table_name = self.identifier_to_database_and_table(identifier, NoSuchTableError) + + try: + self._delete_dynamo_item( + namespace=database_name, + identifier=f"{database_name}.{table_name}", + condition_expression=f"attribute_exists({DYNAMODB_COL_IDENTIFIER})", + ) + except ConditionalCheckFailedException as e: + raise NoSuchTableError(f"Table does not exist: {database_name}.{table_name}") from e + + def rename_table(self, from_identifier: Union[str, Identifier], to_identifier: Union[str, Identifier]) -> Table: + """Rename a fully classified table name. + + This method can only rename Iceberg tables in AWS Glue. + + Args: + from_identifier: Existing table identifier. + to_identifier: New table identifier. + + Returns: + Table: the updated table instance with its metadata. + + Raises: + ValueError: When from table identifier is invalid. + NoSuchTableError: When a table with the name does not exist. + NoSuchIcebergTableError: When from table is not a valid iceberg table. + NoSuchPropertyException: When from table miss some required properties. + NoSuchNamespaceError: When the destination namespace doesn't exist. + """ + from_database_name, from_table_name = self.identifier_to_database_and_table(from_identifier, NoSuchTableError) + to_database_name, to_table_name = self.identifier_to_database_and_table(to_identifier) + + from_table_item = self._get_iceberg_table_item(database_name=from_database_name, table_name=from_table_name) + + try: + # Verify that from_identifier is a valid iceberg table + self._convert_dynamo_table_item_to_iceberg_table(dynamo_table_item=from_table_item) + except NoSuchPropertyException as e: + raise NoSuchPropertyException( + f"Failed to rename table {from_database_name}.{from_table_name} since it is missing required properties" + ) from e + except NoSuchIcebergTableError as e: + raise NoSuchIcebergTableError( + f"Failed to rename table {from_database_name}.{from_table_name} since it is not a valid iceberg table" + ) from e + + self._ensure_namespace_exists(database_name=from_database_name) + self._ensure_namespace_exists(database_name=to_database_name) + + try: + self._put_dynamo_item( + item=_get_rename_table_item( + from_dynamo_table_item=from_table_item, to_database_name=to_database_name, to_table_name=to_table_name + ), + condition_expression=f"attribute_not_exists({DYNAMODB_COL_IDENTIFIER})", + ) + except ConditionalCheckFailedException as e: + raise TableAlreadyExistsError(f"Table {to_database_name}.{to_table_name} already exists") from e + + try: + self.drop_table(from_identifier) + except (NoSuchTableError, GenericDynamoDbError) as e: + log_message = f"Failed to drop old table {from_database_name}.{from_table_name}. " + + try: + self.drop_table(to_identifier) + log_message += f"Rolled back table creation for {to_database_name}.{to_table_name}." + except (NoSuchTableError, GenericDynamoDbError): + log_message += ( + f"Failed to roll back table creation for {to_database_name}.{to_table_name}. " f"Please clean up manually" + ) + + raise ValueError(log_message) from e + + return self.load_table(to_identifier) + + def create_namespace(self, namespace: Union[str, Identifier], properties: Properties = EMPTY_DICT) -> None: + """Create a namespace in the catalog. + + Args: + namespace: Namespace identifier. + properties: A string dictionary of properties for the given namespace. + + Raises: + ValueError: If the identifier is invalid. + AlreadyExistsError: If a namespace with the given name already exists. + """ + database_name = self.identifier_to_database(namespace) + + try: + self._put_dynamo_item( + item=_get_create_database_item(database_name=database_name, properties=properties), + condition_expression=f"attribute_not_exists({DYNAMODB_COL_NAMESPACE})", + ) + except ConditionalCheckFailedException as e: + raise NamespaceAlreadyExistsError(f"Database {database_name} already exists") from e + + def drop_namespace(self, namespace: Union[str, Identifier]) -> None: + """Drop a namespace. + + A Glue namespace can only be dropped if it is empty. + + Args: + namespace: Namespace identifier. + + Raises: + NoSuchNamespaceError: If a namespace with the given name does not exist, or the identifier is invalid. + NamespaceNotEmptyError: If the namespace is not empty. + """ + database_name = self.identifier_to_database(namespace, NoSuchNamespaceError) + table_identifiers = self.list_tables(namespace=database_name) + + if len(table_identifiers) > 0: + raise NamespaceNotEmptyError(f"Database {database_name} is not empty") + + try: + self._delete_dynamo_item( + namespace=database_name, + identifier=DYNAMODB_NAMESPACE, + condition_expression=f"attribute_exists({DYNAMODB_COL_IDENTIFIER})", + ) + except ConditionalCheckFailedException as e: + raise NoSuchNamespaceError(f"Database does not exist: {database_name}") from e + + def list_tables(self, namespace: Union[str, Identifier]) -> List[Identifier]: + """List tables under the given namespace in the catalog (including non-Iceberg tables). + + Args: + namespace (str | Identifier): Namespace identifier to search. + + Returns: + List[Identifier]: list of table identifiers. + """ + database_name = self.identifier_to_database(namespace, NoSuchNamespaceError) + + paginator = self.dynamodb.get_paginator("query") + + try: + page_iterator = paginator.paginate( + TableName=self.dynamodb_table_name, + IndexName=DYNAMODB_NAMESPACE_GSI, + KeyConditionExpression=f"{DYNAMODB_COL_NAMESPACE} = :namespace ", + ExpressionAttributeValues={ + ":namespace": { + "S": database_name, + } + }, + ) + except ( + self.dynamodb.exceptions.ProvisionedThroughputExceededException, + self.dynamodb.exceptions.RequestLimitExceeded, + self.dynamodb.exceptions.InternalServerError, + self.dynamodb.exceptions.ResourceNotFoundException, + ) as e: + raise GenericDynamoDbError(e.message) from e + + table_identifiers = [] + for page in page_iterator: + for item in page["Items"]: + _dict = _convert_dynamo_item_to_regular_dict(item) + identifier_col = _dict[DYNAMODB_COL_IDENTIFIER] + if identifier_col == DYNAMODB_NAMESPACE: + continue + + table_identifiers.append(self.identifier_to_tuple(identifier_col)) + + return table_identifiers + + def list_namespaces(self, namespace: Union[str, Identifier] = ()) -> List[Identifier]: + """List top-level namespaces from the catalog. + + We do not support hierarchical namespace. + + Returns: + List[Identifier]: a List of namespace identifiers. + """ + # Hierarchical namespace is not supported. Return an empty list + if namespace: + return [] + + paginator = self.dynamodb.get_paginator("query") + + try: + page_iterator = paginator.paginate( + TableName=self.dynamodb_table_name, + ConsistentRead=True, + KeyConditionExpression=f"{DYNAMODB_COL_IDENTIFIER} = :identifier", + ExpressionAttributeValues={ + ":identifier": { + "S": DYNAMODB_NAMESPACE, + } + }, + ) + except ( + self.dynamodb.exceptions.ProvisionedThroughputExceededException, + self.dynamodb.exceptions.RequestLimitExceeded, + self.dynamodb.exceptions.InternalServerError, + self.dynamodb.exceptions.ResourceNotFoundException, + ) as e: + raise GenericDynamoDbError(e.message) from e + + database_identifiers = [] + for page in page_iterator: + for item in page["Items"]: + _dict = _convert_dynamo_item_to_regular_dict(item) + namespace_col = _dict[DYNAMODB_COL_NAMESPACE] + database_identifiers.append(self.identifier_to_tuple(namespace_col)) + + return database_identifiers + + def load_namespace_properties(self, namespace: Union[str, Identifier]) -> Properties: + """ + Get properties for a namespace. + + Args: + namespace: Namespace identifier. + + Returns: + Properties: Properties for the given namespace. + + Raises: + NoSuchNamespaceError: If a namespace with the given name does not exist, or identifier is invalid. + """ + database_name = self.identifier_to_database(namespace, NoSuchNamespaceError) + namespace_item = self._get_iceberg_namespace_item(database_name=database_name) + namespace_dict = _convert_dynamo_item_to_regular_dict(namespace_item) + return _get_namespace_properties(namespace_dict=namespace_dict) + + def update_namespace_properties( + self, namespace: Union[str, Identifier], removals: Optional[Set[str]] = None, updates: Properties = EMPTY_DICT + ) -> PropertiesUpdateSummary: + """ + Remove or update provided property keys for a namespace. + + Args: + namespace: Namespace identifier + removals: Set of property keys that need to be removed. Optional Argument. + updates: Properties to be updated for the given namespace. + + Raises: + NoSuchNamespaceError: If a namespace with the given name does not exist, or identifier is invalid. + ValueError: If removals and updates have overlapping keys. + """ + database_name = self.identifier_to_database(namespace, NoSuchNamespaceError) + namespace_item = self._get_iceberg_namespace_item(database_name=database_name) + namespace_dict = _convert_dynamo_item_to_regular_dict(namespace_item) + current_properties = _get_namespace_properties(namespace_dict=namespace_dict) + + properties_update_summary, updated_properties = self._get_updated_props_and_update_summary( + current_properties=current_properties, removals=removals, updates=updates + ) + + try: + self._put_dynamo_item( + item=_get_update_database_item( + namespace_item=namespace_item, + updated_properties=updated_properties, + ), + condition_expression=f"attribute_exists({DYNAMODB_COL_NAMESPACE})", + ) + except ConditionalCheckFailedException as e: + raise NoSuchNamespaceError(f"Database {database_name} does not exist") from e + + return properties_update_summary + + def _get_iceberg_table_item(self, database_name: str, table_name: str) -> Dict[str, Any]: + try: + return self._get_dynamo_item(identifier=f"{database_name}.{table_name}", namespace=database_name) + except ValueError as e: + raise NoSuchTableError(f"Table does not exist: {database_name}.{table_name}") from e + + def _get_iceberg_namespace_item(self, database_name: str) -> Dict[str, Any]: + try: + return self._get_dynamo_item(identifier=DYNAMODB_NAMESPACE, namespace=database_name) + except ValueError as e: + raise NoSuchNamespaceError(f"Namespace does not exist: {database_name}") from e + + def _ensure_namespace_exists(self, database_name: str) -> Dict[str, Any]: + return self._get_iceberg_namespace_item(database_name) + + def _get_dynamo_item(self, identifier: str, namespace: str) -> Dict[str, Any]: + try: + response = self.dynamodb.get_item( + TableName=self.dynamodb_table_name, + ConsistentRead=True, + Key={ + DYNAMODB_COL_IDENTIFIER: { + "S": identifier, + }, + DYNAMODB_COL_NAMESPACE: { + "S": namespace, + }, + }, + ) + if ITEM in response: + return response[ITEM] + else: + raise ValueError(f"Item not found. identifier: {identifier} - namespace: {namespace}") + except self.dynamodb.exceptions.ResourceNotFoundException as e: + raise ValueError(f"Item not found. identifier: {identifier} - namespace: {namespace}") from e + except ( + self.dynamodb.exceptions.ProvisionedThroughputExceededException, + self.dynamodb.exceptions.RequestLimitExceeded, + self.dynamodb.exceptions.InternalServerError, + ) as e: + raise GenericDynamoDbError(e.message) from e + + def _put_dynamo_item(self, item: Dict[str, Any], condition_expression: str) -> None: + try: + self.dynamodb.put_item(TableName=self.dynamodb_table_name, Item=item, ConditionExpression=condition_expression) + except self.dynamodb.exceptions.ConditionalCheckFailedException as e: + raise ConditionalCheckFailedException(f"Condition expression check failed: {condition_expression} - {item}") from e + except ( + self.dynamodb.exceptions.ProvisionedThroughputExceededException, + self.dynamodb.exceptions.RequestLimitExceeded, + self.dynamodb.exceptions.InternalServerError, + self.dynamodb.exceptions.ResourceNotFoundException, + self.dynamodb.exceptions.ItemCollectionSizeLimitExceededException, + self.dynamodb.exceptions.TransactionConflictException, + ) as e: + raise GenericDynamoDbError(e.message) from e + + def _delete_dynamo_item(self, namespace: str, identifier: str, condition_expression: str) -> None: + try: + self.dynamodb.delete_item( + TableName=self.dynamodb_table_name, + Key={ + DYNAMODB_COL_IDENTIFIER: { + "S": identifier, + }, + DYNAMODB_COL_NAMESPACE: { + "S": namespace, + }, + }, + ConditionExpression=condition_expression, + ) + except self.dynamodb.exceptions.ConditionalCheckFailedException as e: + raise ConditionalCheckFailedException( + f"Condition expression check failed: {condition_expression} - {identifier}" + ) from e + except ( + self.dynamodb.exceptions.ProvisionedThroughputExceededException, + self.dynamodb.exceptions.RequestLimitExceeded, + self.dynamodb.exceptions.InternalServerError, + self.dynamodb.exceptions.ResourceNotFoundException, + self.dynamodb.exceptions.ItemCollectionSizeLimitExceededException, + self.dynamodb.exceptions.TransactionConflictException, + ) as e: + raise GenericDynamoDbError(e.message) from e + + def _convert_dynamo_table_item_to_iceberg_table(self, dynamo_table_item: Dict[str, Any]) -> Table: + table_dict = _convert_dynamo_item_to_regular_dict(dynamo_table_item) + + for prop in [_add_property_prefix(prop) for prop in (TABLE_TYPE, METADATA_LOCATION)] + [ + DYNAMODB_COL_IDENTIFIER, + DYNAMODB_COL_NAMESPACE, + DYNAMODB_COL_CREATED_AT, + ]: + if prop not in table_dict.keys(): + raise NoSuchPropertyException(f"Iceberg required property {prop} is missing: {dynamo_table_item}") + + table_type = table_dict[_add_property_prefix(TABLE_TYPE)] + identifier = table_dict[DYNAMODB_COL_IDENTIFIER] + metadata_location = table_dict[_add_property_prefix(METADATA_LOCATION)] + database_name, table_name = self.identifier_to_database_and_table(identifier, NoSuchTableError) + + if table_type.lower() != ICEBERG: + raise NoSuchIcebergTableError( + f"Property table_type is {table_type}, expected {ICEBERG}: " f"{database_name}.{table_name}" + ) + + io = load_file_io(properties=self.properties, location=metadata_location) + file = io.new_input(metadata_location) + metadata = FromInputFile.table_metadata(file) + return Table( + identifier=(self.name, database_name, table_name), + metadata=metadata, + metadata_location=metadata_location, + io=self._load_file_io(metadata.properties, metadata_location), + catalog=self, + ) + + +def _get_create_table_item(database_name: str, table_name: str, properties: Properties, metadata_location: str) -> Dict[str, Any]: + current_timestamp_ms = str(round(time() * 1000)) + _dict = { + DYNAMODB_COL_IDENTIFIER: { + "S": f"{database_name}.{table_name}", + }, + DYNAMODB_COL_NAMESPACE: { + "S": database_name, + }, + DYNAMODB_COL_VERSION: { + "S": str(uuid.uuid4()), + }, + DYNAMODB_COL_CREATED_AT: { + "N": current_timestamp_ms, + }, + DYNAMODB_COL_UPDATED_AT: { + "N": current_timestamp_ms, + }, + } + + for key, val in properties.items(): + _dict[_add_property_prefix(key)] = {"S": val} + + _dict[_add_property_prefix(TABLE_TYPE)] = {"S": ICEBERG.upper()} + _dict[_add_property_prefix(METADATA_LOCATION)] = {"S": metadata_location} + _dict[_add_property_prefix(PREVIOUS_METADATA_LOCATION)] = {"S": ""} + + return _dict + + +def _get_rename_table_item(from_dynamo_table_item: Dict[str, Any], to_database_name: str, to_table_name: str) -> Dict[str, Any]: + _dict = from_dynamo_table_item + current_timestamp_ms = str(round(time() * 1000)) + _dict[DYNAMODB_COL_IDENTIFIER]["S"] = f"{to_database_name}.{to_table_name}" + _dict[DYNAMODB_COL_NAMESPACE]["S"] = to_database_name + _dict[DYNAMODB_COL_VERSION]["S"] = str(uuid.uuid4()) + _dict[DYNAMODB_COL_UPDATED_AT]["N"] = current_timestamp_ms + return _dict + + +def _get_create_database_item(database_name: str, properties: Properties) -> Dict[str, Any]: + current_timestamp_ms = str(round(time() * 1000)) + _dict = { + DYNAMODB_COL_IDENTIFIER: { + "S": DYNAMODB_NAMESPACE, + }, + DYNAMODB_COL_NAMESPACE: { + "S": database_name, + }, + DYNAMODB_COL_VERSION: { + "S": str(uuid.uuid4()), + }, + DYNAMODB_COL_CREATED_AT: { + "N": current_timestamp_ms, + }, + DYNAMODB_COL_UPDATED_AT: { + "N": current_timestamp_ms, + }, + } + + for key, val in properties.items(): + _dict[_add_property_prefix(key)] = {"S": val} + + return _dict + + +def _get_update_database_item(namespace_item: Dict[str, Any], updated_properties: Properties) -> Dict[str, Any]: + current_timestamp_ms = str(round(time() * 1000)) + + _dict = { + DYNAMODB_COL_IDENTIFIER: namespace_item[DYNAMODB_COL_IDENTIFIER], + DYNAMODB_COL_NAMESPACE: namespace_item[DYNAMODB_COL_NAMESPACE], + DYNAMODB_COL_VERSION: { + "S": str(uuid.uuid4()), + }, + DYNAMODB_COL_CREATED_AT: namespace_item[DYNAMODB_COL_CREATED_AT], + DYNAMODB_COL_UPDATED_AT: { + "N": current_timestamp_ms, + }, + } + + for key, val in updated_properties.items(): + _dict[_add_property_prefix(key)] = {"S": val} + + return _dict + + +CREATE_CATALOG_ATTRIBUTE_DEFINITIONS = [ + { + "AttributeName": DYNAMODB_COL_IDENTIFIER, + "AttributeType": "S", + }, + { + "AttributeName": DYNAMODB_COL_NAMESPACE, + "AttributeType": "S", + }, +] + +CREATE_CATALOG_KEY_SCHEMA = [ + { + "AttributeName": DYNAMODB_COL_IDENTIFIER, + "KeyType": "HASH", + }, + { + "AttributeName": DYNAMODB_COL_NAMESPACE, + "KeyType": "RANGE", + }, +] + + +CREATE_CATALOG_GLOBAL_SECONDARY_INDEXES = [ + { + "IndexName": DYNAMODB_NAMESPACE_GSI, + "KeySchema": [ + { + "AttributeName": DYNAMODB_COL_NAMESPACE, + "KeyType": "HASH", + }, + { + "AttributeName": DYNAMODB_COL_IDENTIFIER, + "KeyType": "RANGE", + }, + ], + "Projection": { + "ProjectionType": "KEYS_ONLY", + }, + } +] + + +def _get_namespace_properties(namespace_dict: Dict[str, str]) -> Properties: + return {_remove_property_prefix(key): val for key, val in namespace_dict.items() if key.startswith(PROPERTY_KEY_PREFIX)} + + +def _convert_dynamo_item_to_regular_dict(dynamo_json: Dict[str, Any]) -> Dict[str, str]: + """Convert a dynamo json to a regular json. + + Example of a dynamo json: + { + "AlbumTitle": { + "S": "Songs About Life", + }, + "Artist": { + "S": "Acme Band", + }, + "SongTitle": { + "S": "Happy Day", + } + } + + Converted to regular json: + { + "AlbumTitle": "Songs About Life", + "Artist": "Acme Band", + "SongTitle": "Happy Day" + } + + Only "S" and "N" data types are supported since those are the only ones that Iceberg is utilizing. + """ + regular_json = {} + for column_name, val_dict in dynamo_json.items(): + keys = list(val_dict.keys()) + + if len(keys) != 1: + raise ValueError(f"Expecting only 1 key: {keys}") + + data_type = keys[0] + if data_type not in ("S", "N"): + raise ValueError("Only S and N data types are supported.") + + values = list(val_dict.values()) + assert len(values) == 1 + column_value = values[0] + regular_json[column_name] = column_value + + return regular_json + + +def _add_property_prefix(prop: str) -> str: + return PROPERTY_KEY_PREFIX + prop + + +def _remove_property_prefix(prop: str) -> str: + return prop.lstrip(PROPERTY_KEY_PREFIX) diff --git a/pyiceberg/catalog/glue.py b/pyiceberg/catalog/glue.py new file mode 100644 index 0000000000..e0683632de --- /dev/null +++ b/pyiceberg/catalog/glue.py @@ -0,0 +1,498 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + + +from typing import ( + Any, + List, + Optional, + Set, + Union, + cast, +) + +import boto3 +from mypy_boto3_glue.client import GlueClient +from mypy_boto3_glue.type_defs import ( + DatabaseInputTypeDef, + DatabaseTypeDef, + StorageDescriptorTypeDef, + TableInputTypeDef, + TableTypeDef, +) + +from pyiceberg.catalog import ( + EXTERNAL_TABLE, + ICEBERG, + LOCATION, + METADATA_LOCATION, + TABLE_TYPE, + Catalog, + Identifier, + Properties, + PropertiesUpdateSummary, +) +from pyiceberg.exceptions import ( + NamespaceAlreadyExistsError, + NamespaceNotEmptyError, + NoSuchIcebergTableError, + NoSuchNamespaceError, + NoSuchPropertyException, + NoSuchTableError, + TableAlreadyExistsError, +) +from pyiceberg.io import load_file_io +from pyiceberg.partitioning import UNPARTITIONED_PARTITION_SPEC, PartitionSpec +from pyiceberg.schema import Schema +from pyiceberg.serializers import FromInputFile +from pyiceberg.table import CommitTableRequest, CommitTableResponse, Table +from pyiceberg.table.metadata import new_table_metadata +from pyiceberg.table.sorting import UNSORTED_SORT_ORDER, SortOrder +from pyiceberg.typedef import EMPTY_DICT + + +def _construct_parameters(metadata_location: str) -> Properties: + return {TABLE_TYPE: ICEBERG.upper(), METADATA_LOCATION: metadata_location} + + +def _construct_create_table_input(table_name: str, metadata_location: str, properties: Properties) -> TableInputTypeDef: + table_input: TableInputTypeDef = { + "Name": table_name, + "TableType": EXTERNAL_TABLE, + "Parameters": _construct_parameters(metadata_location), + } + + if "Description" in properties: + table_input["Description"] = properties["Description"] + + return table_input + + +def _construct_rename_table_input(to_table_name: str, glue_table: TableTypeDef) -> TableInputTypeDef: + rename_table_input: TableInputTypeDef = {"Name": to_table_name} + # use the same Glue info to create the new table, pointing to the old metadata + assert glue_table["TableType"] + rename_table_input["TableType"] = glue_table["TableType"] + if "Owner" in glue_table: + rename_table_input["Owner"] = glue_table["Owner"] + + if "Parameters" in glue_table: + rename_table_input["Parameters"] = glue_table["Parameters"] + + if "StorageDescriptor" in glue_table: + # It turns out the output of StorageDescriptor is not the same as the input type + # because the Column can have a different type, but for now it seems to work, so + # silence the type error. + rename_table_input["StorageDescriptor"] = cast(StorageDescriptorTypeDef, glue_table["StorageDescriptor"]) + + if "Description" in glue_table: + rename_table_input["Description"] = glue_table["Description"] + + return rename_table_input + + +def _construct_database_input(database_name: str, properties: Properties) -> DatabaseInputTypeDef: + database_input: DatabaseInputTypeDef = {"Name": database_name} + parameters = {} + for k, v in properties.items(): + if k == "Description": + database_input["Description"] = v + elif k == LOCATION: + database_input["LocationUri"] = v + else: + parameters[k] = v + database_input["Parameters"] = parameters + return database_input + + +class GlueCatalog(Catalog): + def __init__(self, name: str, **properties: Any): + super().__init__(name, **properties) + + session = boto3.Session( + profile_name=properties.get("profile_name"), + region_name=properties.get("region_name"), + botocore_session=properties.get("botocore_session"), + aws_access_key_id=properties.get("aws_access_key_id"), + aws_secret_access_key=properties.get("aws_secret_access_key"), + aws_session_token=properties.get("aws_session_token"), + ) + self.glue: GlueClient = session.client("glue") + + def _convert_glue_to_iceberg(self, glue_table: TableTypeDef) -> Table: + properties: Properties = glue_table["Parameters"] + + assert glue_table["DatabaseName"] + assert glue_table["Parameters"] + database_name = glue_table["DatabaseName"] + table_name = glue_table["Name"] + + if TABLE_TYPE not in properties: + raise NoSuchPropertyException( + f"Property {TABLE_TYPE} missing, could not determine type: {database_name}.{table_name}" + ) + glue_table_type = properties[TABLE_TYPE] + + if glue_table_type.lower() != ICEBERG: + raise NoSuchIcebergTableError( + f"Property table_type is {glue_table_type}, expected {ICEBERG}: {database_name}.{table_name}" + ) + + if METADATA_LOCATION not in properties: + raise NoSuchPropertyException( + f"Table property {METADATA_LOCATION} is missing, cannot find metadata for: {database_name}.{table_name}" + ) + metadata_location = properties[METADATA_LOCATION] + + io = load_file_io(properties=self.properties, location=metadata_location) + file = io.new_input(metadata_location) + metadata = FromInputFile.table_metadata(file) + return Table( + identifier=(self.name, database_name, table_name), + metadata=metadata, + metadata_location=metadata_location, + io=self._load_file_io(metadata.properties, metadata_location), + catalog=self, + ) + + def _create_glue_table(self, database_name: str, table_name: str, table_input: TableInputTypeDef) -> None: + try: + self.glue.create_table(DatabaseName=database_name, TableInput=table_input) + except self.glue.exceptions.AlreadyExistsException as e: + raise TableAlreadyExistsError(f"Table {database_name}.{table_name} already exists") from e + except self.glue.exceptions.EntityNotFoundException as e: + raise NoSuchNamespaceError(f"Database {database_name} does not exist") from e + + def create_table( + self, + identifier: Union[str, Identifier], + schema: Schema, + location: Optional[str] = None, + partition_spec: PartitionSpec = UNPARTITIONED_PARTITION_SPEC, + sort_order: SortOrder = UNSORTED_SORT_ORDER, + properties: Properties = EMPTY_DICT, + ) -> Table: + """ + Create an Iceberg table. + + Args: + identifier: Table identifier. + schema: Table's schema. + location: Location for the table. Optional Argument. + partition_spec: PartitionSpec for the table. + sort_order: SortOrder for the table. + properties: Table properties that can be a string based dictionary. + + Returns: + Table: the created table instance. + + Raises: + AlreadyExistsError: If a table with the name already exists. + ValueError: If the identifier is invalid, or no path is given to store metadata. + + """ + database_name, table_name = self.identifier_to_database_and_table(identifier) + + location = self._resolve_table_location(location, database_name, table_name) + metadata_location = self._get_metadata_location(location=location) + metadata = new_table_metadata( + location=location, schema=schema, partition_spec=partition_spec, sort_order=sort_order, properties=properties + ) + io = load_file_io(properties=self.properties, location=metadata_location) + self._write_metadata(metadata, io, metadata_location) + + table_input = _construct_create_table_input(table_name, metadata_location, properties) + database_name, table_name = self.identifier_to_database_and_table(identifier) + self._create_glue_table(database_name=database_name, table_name=table_name, table_input=table_input) + + return self.load_table(identifier=identifier) + + def register_table(self, identifier: Union[str, Identifier], metadata_location: str) -> Table: + """Register a new table using existing metadata. + + Args: + identifier Union[str, Identifier]: Table identifier for the table + metadata_location str: The location to the metadata + + Returns: + Table: The newly registered table + + Raises: + TableAlreadyExistsError: If the table already exists + """ + raise NotImplementedError + + def _commit_table(self, table_request: CommitTableRequest) -> CommitTableResponse: + """Update the table. + + Args: + table_request (CommitTableRequest): The table requests to be carried out. + + Returns: + CommitTableResponse: The updated metadata. + + Raises: + NoSuchTableError: If a table with the given identifier does not exist. + """ + raise NotImplementedError + + def load_table(self, identifier: Union[str, Identifier]) -> Table: + """Load the table's metadata and returns the table instance. + + You can also use this method to check for table existence using 'try catalog.table() except TableNotFoundError'. + Note: This method doesn't scan data stored in the table. + + Args: + identifier: Table identifier. + + Returns: + Table: the table instance with its metadata. + + Raises: + NoSuchTableError: If a table with the name does not exist, or the identifier is invalid. + """ + database_name, table_name = self.identifier_to_database_and_table(identifier, NoSuchTableError) + try: + load_table_response = self.glue.get_table(DatabaseName=database_name, Name=table_name) + except self.glue.exceptions.EntityNotFoundException as e: + raise NoSuchTableError(f"Table does not exist: {database_name}.{table_name}") from e + + return self._convert_glue_to_iceberg(load_table_response["Table"]) + + def drop_table(self, identifier: Union[str, Identifier]) -> None: + """Drop a table. + + Args: + identifier: Table identifier. + + Raises: + NoSuchTableError: If a table with the name does not exist, or the identifier is invalid. + """ + database_name, table_name = self.identifier_to_database_and_table(identifier, NoSuchTableError) + try: + self.glue.delete_table(DatabaseName=database_name, Name=table_name) + except self.glue.exceptions.EntityNotFoundException as e: + raise NoSuchTableError(f"Table does not exist: {database_name}.{table_name}") from e + + def rename_table(self, from_identifier: Union[str, Identifier], to_identifier: Union[str, Identifier]) -> Table: + """Rename a fully classified table name. + + This method can only rename Iceberg tables in AWS Glue. + + Args: + from_identifier: Existing table identifier. + to_identifier: New table identifier. + + Returns: + Table: the updated table instance with its metadata. + + Raises: + ValueError: When from table identifier is invalid. + NoSuchTableError: When a table with the name does not exist. + NoSuchIcebergTableError: When from table is not a valid iceberg table. + NoSuchPropertyException: When from table miss some required properties. + NoSuchNamespaceError: When the destination namespace doesn't exist. + """ + from_database_name, from_table_name = self.identifier_to_database_and_table(from_identifier, NoSuchTableError) + to_database_name, to_table_name = self.identifier_to_database_and_table(to_identifier) + try: + get_table_response = self.glue.get_table(DatabaseName=from_database_name, Name=from_table_name) + except self.glue.exceptions.EntityNotFoundException as e: + raise NoSuchTableError(f"Table does not exist: {from_database_name}.{from_table_name}") from e + + glue_table = get_table_response["Table"] + + try: + # verify that from_identifier is a valid iceberg table + self._convert_glue_to_iceberg(glue_table=glue_table) + except NoSuchPropertyException as e: + raise NoSuchPropertyException( + f"Failed to rename table {from_database_name}.{from_table_name} since it is missing required properties" + ) from e + except NoSuchIcebergTableError as e: + raise NoSuchIcebergTableError( + f"Failed to rename table {from_database_name}.{from_table_name} since it is not a valid iceberg table" + ) from e + + rename_table_input = _construct_rename_table_input(to_table_name=to_table_name, glue_table=glue_table) + self._create_glue_table(database_name=to_database_name, table_name=to_table_name, table_input=rename_table_input) + + try: + self.drop_table(from_identifier) + except Exception as e: + log_message = f"Failed to drop old table {from_database_name}.{from_table_name}. " + + try: + self.drop_table(to_identifier) + log_message += f"Rolled back table creation for {to_database_name}.{to_table_name}." + except NoSuchTableError: + log_message += ( + f"Failed to roll back table creation for {to_database_name}.{to_table_name}. " f"Please clean up manually" + ) + + raise ValueError(log_message) from e + + return self.load_table(to_identifier) + + def create_namespace(self, namespace: Union[str, Identifier], properties: Properties = EMPTY_DICT) -> None: + """Create a namespace in the catalog. + + Args: + namespace: Namespace identifier. + properties: A string dictionary of properties for the given namespace. + + Raises: + ValueError: If the identifier is invalid. + AlreadyExistsError: If a namespace with the given name already exists. + """ + database_name = self.identifier_to_database(namespace) + try: + self.glue.create_database(DatabaseInput=_construct_database_input(database_name, properties)) + except self.glue.exceptions.AlreadyExistsException as e: + raise NamespaceAlreadyExistsError(f"Database {database_name} already exists") from e + + def drop_namespace(self, namespace: Union[str, Identifier]) -> None: + """Drop a namespace. + + A Glue namespace can only be dropped if it is empty. + + Args: + namespace: Namespace identifier. + + Raises: + NoSuchNamespaceError: If a namespace with the given name does not exist, or the identifier is invalid. + NamespaceNotEmptyError: If the namespace is not empty. + """ + database_name = self.identifier_to_database(namespace, NoSuchNamespaceError) + try: + table_list = self.list_tables(namespace=database_name) + except NoSuchNamespaceError as e: + raise NoSuchNamespaceError(f"Database does not exist: {database_name}") from e + + if len(table_list) > 0: + raise NamespaceNotEmptyError(f"Database {database_name} is not empty") + + self.glue.delete_database(Name=database_name) + + def list_tables(self, namespace: Union[str, Identifier]) -> List[Identifier]: + """List tables under the given namespace in the catalog (including non-Iceberg tables). + + Args: + namespace (str | Identifier): Namespace identifier to search. + + Returns: + List[Identifier]: list of table identifiers. + + Raises: + NoSuchNamespaceError: If a namespace with the given name does not exist, or the identifier is invalid. + """ + database_name = self.identifier_to_database(namespace, NoSuchNamespaceError) + table_list: List[TableTypeDef] = [] + next_token: Optional[str] = None + try: + table_list_response = self.glue.get_tables(DatabaseName=database_name) + while True: + table_list_response = ( + self.glue.get_tables(DatabaseName=database_name) + if not next_token + else self.glue.get_tables(DatabaseName=database_name, NextToken=next_token) + ) + table_list.extend(table_list_response["TableList"]) + next_token = table_list_response.get("NextToken") + if not next_token: + break + + except self.glue.exceptions.EntityNotFoundException as e: + raise NoSuchNamespaceError(f"Database does not exist: {database_name}") from e + return [(database_name, table["Name"]) for table in table_list] + + def list_namespaces(self, namespace: Union[str, Identifier] = ()) -> List[Identifier]: + """List namespaces from the given namespace. If not given, list top-level namespaces from the catalog. + + Returns: + List[Identifier]: a List of namespace identifiers. + """ + # Hierarchical namespace is not supported. Return an empty list + if namespace: + return [] + + database_list: List[DatabaseTypeDef] = [] + databases_response = self.glue.get_databases() + next_token: Optional[str] = None + + while True: + databases_response = self.glue.get_databases() if not next_token else self.glue.get_databases(NextToken=next_token) + database_list.extend(databases_response["DatabaseList"]) + next_token = databases_response.get("NextToken") + if not next_token: + break + + return [self.identifier_to_tuple(database["Name"]) for database in database_list] + + def load_namespace_properties(self, namespace: Union[str, Identifier]) -> Properties: + """Get properties for a namespace. + + Args: + namespace: Namespace identifier. + + Returns: + Properties: Properties for the given namespace. + + Raises: + NoSuchNamespaceError: If a namespace with the given name does not exist, or identifier is invalid. + """ + database_name = self.identifier_to_database(namespace, NoSuchNamespaceError) + try: + database_response = self.glue.get_database(Name=database_name) + except self.glue.exceptions.EntityNotFoundException as e: + raise NoSuchNamespaceError(f"Database does not exist: {database_name}") from e + except self.glue.exceptions.InvalidInputException as e: + raise NoSuchNamespaceError(f"Invalid input for namespace {database_name}") from e + + database = database_response["Database"] + + properties = dict(database.get("Parameters", {})) + if "LocationUri" in database: + properties["location"] = database["LocationUri"] + if "Description" in database: + properties["Description"] = database["Description"] + + return properties + + def update_namespace_properties( + self, namespace: Union[str, Identifier], removals: Optional[Set[str]] = None, updates: Properties = EMPTY_DICT + ) -> PropertiesUpdateSummary: + """Remove provided property keys and updates properties for a namespace. + + Args: + namespace: Namespace identifier. + removals: Set of property keys that need to be removed. Optional Argument. + updates: Properties to be updated for the given namespace. + + Raises: + NoSuchNamespaceError: If a namespace with the given name does not exist, or identifier is invalid. + ValueError: If removals and updates have overlapping keys. + """ + current_properties = self.load_namespace_properties(namespace=namespace) + properties_update_summary, updated_properties = self._get_updated_props_and_update_summary( + current_properties=current_properties, removals=removals, updates=updates + ) + + database_name = self.identifier_to_database(namespace, NoSuchNamespaceError) + self.glue.update_database(Name=database_name, DatabaseInput=_construct_database_input(database_name, updated_properties)) + + return properties_update_summary diff --git a/pyiceberg/catalog/hive.py b/pyiceberg/catalog/hive.py new file mode 100644 index 0000000000..21f171421e --- /dev/null +++ b/pyiceberg/catalog/hive.py @@ -0,0 +1,544 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +import getpass +import time +from types import TracebackType +from typing import ( + Any, + Dict, + List, + Optional, + Set, + Type, + Union, +) +from urllib.parse import urlparse + +from hive_metastore.ThriftHiveMetastore import Client +from hive_metastore.ttypes import ( + AlreadyExistsException, + FieldSchema, + InvalidOperationException, + MetaException, + NoSuchObjectException, + SerDeInfo, + StorageDescriptor, +) +from hive_metastore.ttypes import Database as HiveDatabase +from hive_metastore.ttypes import Table as HiveTable +from thrift.protocol import TBinaryProtocol +from thrift.transport import TSocket, TTransport + +from pyiceberg.catalog import ( + EXTERNAL_TABLE, + ICEBERG, + LOCATION, + METADATA_LOCATION, + TABLE_TYPE, + Catalog, + Identifier, + Properties, + PropertiesUpdateSummary, +) +from pyiceberg.exceptions import ( + NamespaceAlreadyExistsError, + NamespaceNotEmptyError, + NoSuchIcebergTableError, + NoSuchNamespaceError, + NoSuchTableError, + TableAlreadyExistsError, +) +from pyiceberg.io import FileIO, load_file_io +from pyiceberg.partitioning import UNPARTITIONED_PARTITION_SPEC, PartitionSpec +from pyiceberg.schema import Schema, SchemaVisitor, visit +from pyiceberg.serializers import FromInputFile +from pyiceberg.table import CommitTableRequest, CommitTableResponse, Table +from pyiceberg.table.metadata import new_table_metadata +from pyiceberg.table.sorting import UNSORTED_SORT_ORDER, SortOrder +from pyiceberg.typedef import EMPTY_DICT +from pyiceberg.types import ( + BinaryType, + BooleanType, + DateType, + DecimalType, + DoubleType, + FixedType, + FloatType, + IntegerType, + ListType, + LongType, + MapType, + NestedField, + PrimitiveType, + StringType, + StructType, + TimestampType, + TimeType, + UUIDType, +) + +# Replace by visitor +hive_types = { + BooleanType: "boolean", + IntegerType: "int", + LongType: "bigint", + FloatType: "float", + DoubleType: "double", + DateType: "date", + TimeType: "string", + TimestampType: "timestamp", + StringType: "string", + UUIDType: "string", + BinaryType: "binary", + FixedType: "binary", +} + +COMMENT = "comment" +OWNER = "owner" + + +class _HiveClient: + """Helper class to nicely open and close the transport.""" + + _transport: TTransport + _client: Client + + def __init__(self, uri: str): + url_parts = urlparse(uri) + transport = TSocket.TSocket(url_parts.hostname, url_parts.port) + self._transport = TTransport.TBufferedTransport(transport) + protocol = TBinaryProtocol.TBinaryProtocol(transport) + + self._client = Client(protocol) + + def __enter__(self) -> Client: + self._transport.open() + return self._client + + def __exit__( + self, exctype: Optional[Type[BaseException]], excinst: Optional[BaseException], exctb: Optional[TracebackType] + ) -> None: + self._transport.close() + + +def _construct_hive_storage_descriptor(schema: Schema, location: Optional[str]) -> StorageDescriptor: + ser_de_info = SerDeInfo(serializationLib="org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe") + return StorageDescriptor( + [FieldSchema(field.name, visit(field.field_type, SchemaToHiveConverter()), field.doc) for field in schema.fields], + location, + "org.apache.hadoop.mapred.FileInputFormat", + "org.apache.hadoop.mapred.FileOutputFormat", + serdeInfo=ser_de_info, + ) + + +PROP_EXTERNAL = "EXTERNAL" +PROP_TABLE_TYPE = "table_type" +PROP_METADATA_LOCATION = "metadata_location" +PROP_PREVIOUS_METADATA_LOCATION = "previous_metadata_location" + + +def _construct_parameters(metadata_location: str, previous_metadata_location: Optional[str] = None) -> Dict[str, Any]: + properties = {PROP_EXTERNAL: "TRUE", PROP_TABLE_TYPE: "ICEBERG", PROP_METADATA_LOCATION: metadata_location} + if previous_metadata_location: + properties[PROP_PREVIOUS_METADATA_LOCATION] = previous_metadata_location + + return properties + + +def _annotate_namespace(database: HiveDatabase, properties: Properties) -> HiveDatabase: + params = {} + for key, value in properties.items(): + if key == COMMENT: + database.description = value + elif key == LOCATION: + database.locationUri = value + else: + params[key] = value + database.parameters = params + return database + + +HIVE_PRIMITIVE_TYPES = { + BooleanType: "boolean", + IntegerType: "int", + LongType: "bigint", + FloatType: "float", + DoubleType: "double", + DateType: "date", + TimeType: "string", + TimestampType: "timestamp", + StringType: "string", + UUIDType: "string", + BinaryType: "binary", + FixedType: "binary", +} + + +class SchemaToHiveConverter(SchemaVisitor[str]): + def schema(self, schema: Schema, struct_result: str) -> str: + return struct_result + + def struct(self, struct: StructType, field_results: List[str]) -> str: + return f"struct<{','.join(field_results)}>" + + def field(self, field: NestedField, field_result: str) -> str: + return f"{field.name}:{field_result}" + + def list(self, list_type: ListType, element_result: str) -> str: + return f"array<{element_result}>" + + def map(self, map_type: MapType, key_result: str, value_result: str) -> str: + # Key has to be primitive for Hive + return f"map<{key_result},{value_result}>" + + def primitive(self, primitive: PrimitiveType) -> str: + if isinstance(primitive, DecimalType): + return f"decimal({primitive.precision},{primitive.scale})" + else: + return HIVE_PRIMITIVE_TYPES[type(primitive)] + + +class HiveCatalog(Catalog): + _client: _HiveClient + + def __init__(self, name: str, **properties: str): + super().__init__(name, **properties) + self._client = _HiveClient(properties["uri"]) + + def _convert_hive_into_iceberg(self, table: HiveTable, io: FileIO) -> Table: + properties: Dict[str, str] = table.parameters + if TABLE_TYPE not in properties: + raise NoSuchTableError(f"Property table_type missing, could not determine type: {table.dbName}.{table.tableName}") + + table_type = properties[TABLE_TYPE] + if table_type.lower() != ICEBERG: + raise NoSuchIcebergTableError( + f"Property table_type is {table_type}, expected {ICEBERG}: {table.dbName}.{table.tableName}" + ) + + if prop_metadata_location := properties.get(METADATA_LOCATION): + metadata_location = prop_metadata_location + else: + raise NoSuchTableError(f"Table property {METADATA_LOCATION} is missing") + + file = io.new_input(metadata_location) + metadata = FromInputFile.table_metadata(file) + return Table( + identifier=(self.name, table.dbName, table.tableName), + metadata=metadata, + metadata_location=metadata_location, + io=self._load_file_io(metadata.properties, metadata_location), + catalog=self, + ) + + def create_table( + self, + identifier: Union[str, Identifier], + schema: Schema, + location: Optional[str] = None, + partition_spec: PartitionSpec = UNPARTITIONED_PARTITION_SPEC, + sort_order: SortOrder = UNSORTED_SORT_ORDER, + properties: Properties = EMPTY_DICT, + ) -> Table: + """Create a table. + + Args: + identifier: Table identifier. + schema: Table's schema. + location: Location for the table. Optional Argument. + partition_spec: PartitionSpec for the table. + sort_order: SortOrder for the table. + properties: Table properties that can be a string based dictionary. + + Returns: + Table: the created table instance. + + Raises: + AlreadyExistsError: If a table with the name already exists. + ValueError: If the identifier is invalid. + """ + database_name, table_name = self.identifier_to_database_and_table(identifier) + current_time_millis = int(time.time() * 1000) + + location = self._resolve_table_location(location, database_name, table_name) + + metadata_location = self._get_metadata_location(location=location) + metadata = new_table_metadata( + location=location, schema=schema, partition_spec=partition_spec, sort_order=sort_order, properties=properties + ) + io = load_file_io({**self.properties, **properties}, location=location) + self._write_metadata(metadata, io, metadata_location) + + tbl = HiveTable( + dbName=database_name, + tableName=table_name, + owner=properties[OWNER] if properties and OWNER in properties else getpass.getuser(), + createTime=current_time_millis // 1000, + lastAccessTime=current_time_millis // 1000, + sd=_construct_hive_storage_descriptor(schema, location), + tableType=EXTERNAL_TABLE, + parameters=_construct_parameters(metadata_location), + ) + try: + with self._client as open_client: + open_client.create_table(tbl) + hive_table = open_client.get_table(dbname=database_name, tbl_name=table_name) + except AlreadyExistsException as e: + raise TableAlreadyExistsError(f"Table {database_name}.{table_name} already exists") from e + + return self._convert_hive_into_iceberg(hive_table, io) + + def register_table(self, identifier: Union[str, Identifier], metadata_location: str) -> Table: + """Register a new table using existing metadata. + + Args: + identifier Union[str, Identifier]: Table identifier for the table + metadata_location str: The location to the metadata + + Returns: + Table: The newly registered table + + Raises: + TableAlreadyExistsError: If the table already exists + """ + raise NotImplementedError + + def _commit_table(self, table_request: CommitTableRequest) -> CommitTableResponse: + """Update the table. + + Args: + table_request (CommitTableRequest): The table requests to be carried out. + + Returns: + CommitTableResponse: The updated metadata. + + Raises: + NoSuchTableError: If a table with the given identifier does not exist. + """ + raise NotImplementedError + + def load_table(self, identifier: Union[str, Identifier]) -> Table: + """Load the table's metadata and return the table instance. + + You can also use this method to check for table existence using 'try catalog.table() except TableNotFoundError'. + Note: This method doesn't scan data stored in the table. + + Args: + identifier: Table identifier. + + Returns: + Table: the table instance with its metadata. + + Raises: + NoSuchTableError: If a table with the name does not exist, or the identifier is invalid. + """ + database_name, table_name = self.identifier_to_database_and_table(identifier, NoSuchTableError) + try: + with self._client as open_client: + hive_table = open_client.get_table(dbname=database_name, tbl_name=table_name) + except NoSuchObjectException as e: + raise NoSuchTableError(f"Table does not exists: {table_name}") from e + + io = load_file_io({**self.properties, **hive_table.parameters}, hive_table.sd.location) + return self._convert_hive_into_iceberg(hive_table, io) + + def drop_table(self, identifier: Union[str, Identifier]) -> None: + """Drop a table. + + Args: + identifier: Table identifier. + + Raises: + NoSuchTableError: If a table with the name does not exist, or the identifier is invalid. + """ + database_name, table_name = self.identifier_to_database_and_table(identifier, NoSuchTableError) + try: + with self._client as open_client: + open_client.drop_table(dbname=database_name, name=table_name, deleteData=False) + except NoSuchObjectException as e: + # When the namespace doesn't exist, it throws the same error + raise NoSuchTableError(f"Table does not exists: {table_name}") from e + + def purge_table(self, identifier: Union[str, Identifier]) -> None: + # This requires to traverse the reachability set, and drop all the data files. + raise NotImplementedError("Not yet implemented") + + def rename_table(self, from_identifier: Union[str, Identifier], to_identifier: Union[str, Identifier]) -> Table: + """Rename a fully classified table name. + + Args: + from_identifier: Existing table identifier. + to_identifier: New table identifier. + + Returns: + Table: the updated table instance with its metadata. + + Raises: + ValueError: When from table identifier is invalid. + NoSuchTableError: When a table with the name does not exist. + NoSuchNamespaceError: When the destination namespace doesn't exist. + """ + from_database_name, from_table_name = self.identifier_to_database_and_table(from_identifier, NoSuchTableError) + to_database_name, to_table_name = self.identifier_to_database_and_table(to_identifier) + try: + with self._client as open_client: + tbl = open_client.get_table(dbname=from_database_name, tbl_name=from_table_name) + tbl.dbName = to_database_name + tbl.tableName = to_table_name + open_client.alter_table(dbname=from_database_name, tbl_name=from_table_name, new_tbl=tbl) + except NoSuchObjectException as e: + raise NoSuchTableError(f"Table does not exist: {from_table_name}") from e + except InvalidOperationException as e: + raise NoSuchNamespaceError(f"Database does not exists: {to_database_name}") from e + return self.load_table(to_identifier) + + def create_namespace(self, namespace: Union[str, Identifier], properties: Properties = EMPTY_DICT) -> None: + """Create a namespace in the catalog. + + Args: + namespace: Namespace identifier. + properties: A string dictionary of properties for the given namespace. + + Raises: + ValueError: If the identifier is invalid. + AlreadyExistsError: If a namespace with the given name already exists. + """ + database_name = self.identifier_to_database(namespace) + hive_database = HiveDatabase(name=database_name, parameters=properties) + + try: + with self._client as open_client: + open_client.create_database(_annotate_namespace(hive_database, properties)) + except AlreadyExistsException as e: + raise NamespaceAlreadyExistsError(f"Database {database_name} already exists") from e + + def drop_namespace(self, namespace: Union[str, Identifier]) -> None: + """Drop a namespace. + + Args: + namespace: Namespace identifier. + + Raises: + NoSuchNamespaceError: If a namespace with the given name does not exist, or the identifier is invalid. + NamespaceNotEmptyError: If the namespace is not empty. + """ + database_name = self.identifier_to_database(namespace, NoSuchNamespaceError) + try: + with self._client as open_client: + open_client.drop_database(database_name, deleteData=False, cascade=False) + except InvalidOperationException as e: + raise NamespaceNotEmptyError(f"Database {database_name} is not empty") from e + except MetaException as e: + raise NoSuchNamespaceError(f"Database does not exists: {database_name}") from e + + def list_tables(self, namespace: Union[str, Identifier]) -> List[Identifier]: + """List tables under the given namespace in the catalog (including non-Iceberg tables). + + When the database doesn't exist, it will just return an empty list. + + Args: + namespace: Database to list. + + Returns: + List[Identifier]: list of table identifiers. + + Raises: + NoSuchNamespaceError: If a namespace with the given name does not exist, or the identifier is invalid. + """ + database_name = self.identifier_to_database(namespace, NoSuchNamespaceError) + with self._client as open_client: + return [(database_name, table_name) for table_name in open_client.get_all_tables(db_name=database_name)] + + def list_namespaces(self, namespace: Union[str, Identifier] = ()) -> List[Identifier]: + """List namespaces from the given namespace. If not given, list top-level namespaces from the catalog. + + Returns: + List[Identifier]: a List of namespace identifiers. + """ + # Hierarchical namespace is not supported. Return an empty list + if namespace: + return [] + + with self._client as open_client: + return list(map(self.identifier_to_tuple, open_client.get_all_databases())) + + def load_namespace_properties(self, namespace: Union[str, Identifier]) -> Properties: + """Get properties for a namespace. + + Args: + namespace: Namespace identifier. + + Returns: + Properties: Properties for the given namespace. + + Raises: + NoSuchNamespaceError: If a namespace with the given name does not exist, or identifier is invalid. + """ + database_name = self.identifier_to_database(namespace, NoSuchNamespaceError) + try: + with self._client as open_client: + database = open_client.get_database(name=database_name) + properties = database.parameters + properties[LOCATION] = database.locationUri + if comment := database.description: + properties[COMMENT] = comment + return properties + except NoSuchObjectException as e: + raise NoSuchNamespaceError(f"Database does not exists: {database_name}") from e + + def update_namespace_properties( + self, namespace: Union[str, Identifier], removals: Optional[Set[str]] = None, updates: Properties = EMPTY_DICT + ) -> PropertiesUpdateSummary: + """Remove provided property keys and update properties for a namespace. + + Args: + namespace: Namespace identifier. + removals: Set of property keys that need to be removed. Optional Argument. + updates: Properties to be updated for the given namespace. + + Raises: + NoSuchNamespaceError: If a namespace with the given name does not exist + ValueError: If removals and updates have overlapping keys. + """ + self._check_for_overlap(updates=updates, removals=removals) + database_name = self.identifier_to_database(namespace, NoSuchNamespaceError) + with self._client as open_client: + try: + database = open_client.get_database(database_name) + parameters = database.parameters + except NoSuchObjectException as e: + raise NoSuchNamespaceError(f"Database does not exists: {database_name}") from e + + removed: Set[str] = set() + updated: Set[str] = set() + + if removals: + for key in removals: + if key in parameters: + parameters[key] = None + removed.add(key) + if updates: + for key, value in updates.items(): + parameters[key] = value + updated.add(key) + + open_client.alter_database(database_name, _annotate_namespace(database, parameters)) + + expected_to_change = (removals or set()).difference(removed) + + return PropertiesUpdateSummary(removed=list(removed or []), updated=list(updated or []), missing=list(expected_to_change)) diff --git a/pyiceberg/catalog/noop.py b/pyiceberg/catalog/noop.py new file mode 100644 index 0000000000..083f851d1c --- /dev/null +++ b/pyiceberg/catalog/noop.py @@ -0,0 +1,94 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +from typing import ( + List, + Optional, + Set, + Union, +) + +from pyiceberg.catalog import Catalog, PropertiesUpdateSummary +from pyiceberg.partitioning import UNPARTITIONED_PARTITION_SPEC, PartitionSpec +from pyiceberg.schema import Schema +from pyiceberg.table import ( + CommitTableRequest, + CommitTableResponse, + SortOrder, + Table, +) +from pyiceberg.table.sorting import UNSORTED_SORT_ORDER +from pyiceberg.typedef import EMPTY_DICT, Identifier, Properties + + +class NoopCatalog(Catalog): + def create_table( + self, + identifier: Union[str, Identifier], + schema: Schema, + location: Optional[str] = None, + partition_spec: PartitionSpec = UNPARTITIONED_PARTITION_SPEC, + sort_order: SortOrder = UNSORTED_SORT_ORDER, + properties: Properties = EMPTY_DICT, + ) -> Table: + raise NotImplementedError + + def load_table(self, identifier: Union[str, Identifier]) -> Table: + raise NotImplementedError + + def register_table(self, identifier: Union[str, Identifier], metadata_location: str) -> Table: + """Register a new table using existing metadata. + + Args: + identifier Union[str, Identifier]: Table identifier for the table + metadata_location str: The location to the metadata + + Returns: + Table: The newly registered table + + Raises: + TableAlreadyExistsError: If the table already exists + """ + raise NotImplementedError + + def drop_table(self, identifier: Union[str, Identifier]) -> None: + raise NotImplementedError + + def rename_table(self, from_identifier: Union[str, Identifier], to_identifier: Union[str, Identifier]) -> Table: + raise NotImplementedError + + def _commit_table(self, table_request: CommitTableRequest) -> CommitTableResponse: + raise NotImplementedError + + def create_namespace(self, namespace: Union[str, Identifier], properties: Properties = EMPTY_DICT) -> None: + raise NotImplementedError + + def drop_namespace(self, namespace: Union[str, Identifier]) -> None: + raise NotImplementedError + + def list_tables(self, namespace: Union[str, Identifier]) -> List[Identifier]: + raise NotImplementedError + + def list_namespaces(self, namespace: Union[str, Identifier] = ()) -> List[Identifier]: + raise NotImplementedError + + def load_namespace_properties(self, namespace: Union[str, Identifier]) -> Properties: + raise NotImplementedError + + def update_namespace_properties( + self, namespace: Union[str, Identifier], removals: Optional[Set[str]] = None, updates: Properties = EMPTY_DICT + ) -> PropertiesUpdateSummary: + raise NotImplementedError diff --git a/pyiceberg/catalog/rest.py b/pyiceberg/catalog/rest.py new file mode 100644 index 0000000000..0023e18984 --- /dev/null +++ b/pyiceberg/catalog/rest.py @@ -0,0 +1,625 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +from json import JSONDecodeError +from typing import ( + Any, + Dict, + List, + Literal, + Optional, + Set, + Tuple, + Type, + Union, +) + +from pydantic import Field, ValidationError +from requests import HTTPError, Session + +from pyiceberg import __version__ +from pyiceberg.catalog import ( + TOKEN, + URI, + WAREHOUSE_LOCATION, + Catalog, + Identifier, + Properties, + PropertiesUpdateSummary, +) +from pyiceberg.exceptions import ( + AuthorizationExpiredError, + BadRequestError, + CommitFailedException, + CommitStateUnknownException, + ForbiddenError, + NamespaceAlreadyExistsError, + NoSuchNamespaceError, + NoSuchTableError, + OAuthError, + RESTError, + ServerError, + ServiceUnavailableError, + TableAlreadyExistsError, + UnauthorizedError, +) +from pyiceberg.partitioning import UNPARTITIONED_PARTITION_SPEC, PartitionSpec +from pyiceberg.schema import Schema +from pyiceberg.table import ( + CommitTableRequest, + CommitTableResponse, + Table, + TableMetadata, +) +from pyiceberg.table.sorting import UNSORTED_SORT_ORDER, SortOrder +from pyiceberg.typedef import EMPTY_DICT, IcebergBaseModel + +ICEBERG_REST_SPEC_VERSION = "0.14.1" + + +class Endpoints: + get_config: str = "config" + list_namespaces: str = "namespaces" + create_namespace: str = "namespaces" + load_namespace_metadata: str = "namespaces/{namespace}" + drop_namespace: str = "namespaces/{namespace}" + update_namespace_properties: str = "namespaces/{namespace}/properties" + list_tables: str = "namespaces/{namespace}/tables" + create_table: str = "namespaces/{namespace}/tables" + register_table = "namespaces/{namespace}/register" + load_table: str = "namespaces/{namespace}/tables/{table}" + update_table: str = "namespaces/{namespace}/tables/{table}" + drop_table: str = "namespaces/{namespace}/tables/{table}?purgeRequested={purge}" + table_exists: str = "namespaces/{namespace}/tables/{table}" + get_token: str = "oauth/tokens" + rename_table: str = "tables/rename" + + +AUTHORIZATION_HEADER = "Authorization" +BEARER_PREFIX = "Bearer" +CATALOG_SCOPE = "catalog" +CLIENT_ID = "client_id" +PREFIX = "prefix" +CLIENT_SECRET = "client_secret" +CLIENT_CREDENTIALS = "client_credentials" +CREDENTIAL = "credential" +GRANT_TYPE = "grant_type" +SCOPE = "scope" +TOKEN_EXCHANGE = "urn:ietf:params:oauth:grant-type:token-exchange" +SEMICOLON = ":" +KEY = "key" +CERT = "cert" +CLIENT = "client" +CA_BUNDLE = "cabundle" +SSL = "ssl" +SIGV4 = "rest.sigv4-enabled" +SIGV4_REGION = "rest.signing-region" +SIGV4_SERVICE = "rest.signing-name" + +NAMESPACE_SEPARATOR = b"\x1F".decode("UTF-8") + + +class TableResponse(IcebergBaseModel): + metadata_location: str = Field(alias="metadata-location") + metadata: TableMetadata + config: Properties = Field(default_factory=dict) + + +class CreateTableRequest(IcebergBaseModel): + name: str = Field() + location: Optional[str] = Field() + table_schema: Schema = Field(alias="schema") + partition_spec: Optional[PartitionSpec] = Field(alias="partition-spec") + write_order: Optional[SortOrder] = Field(alias="write-order") + stage_create: bool = Field(alias="stage-create", default=False) + properties: Properties = Field(default_factory=dict) + + +class RegisterTableRequest(IcebergBaseModel): + name: str + metadata_location: str = Field(..., alias="metadata-location") + + +class TokenResponse(IcebergBaseModel): + access_token: str = Field() + token_type: str = Field() + expires_in: int = Field() + issued_token_type: str = Field() + + +class ConfigResponse(IcebergBaseModel): + defaults: Properties = Field() + overrides: Properties = Field() + + +class ListNamespaceResponse(IcebergBaseModel): + namespaces: List[Identifier] = Field() + + +class NamespaceResponse(IcebergBaseModel): + namespace: Identifier = Field() + properties: Properties = Field() + + +class UpdateNamespacePropertiesResponse(IcebergBaseModel): + removed: List[str] = Field() + updated: List[str] = Field() + missing: List[str] = Field() + + +class ListTableResponseEntry(IcebergBaseModel): + name: str = Field() + namespace: Identifier = Field() + + +class ListTablesResponse(IcebergBaseModel): + identifiers: List[ListTableResponseEntry] = Field() + + +class ErrorResponseMessage(IcebergBaseModel): + message: str = Field() + type: str = Field() + code: int = Field() + + +class ErrorResponse(IcebergBaseModel): + error: ErrorResponseMessage = Field() + + +class OAuthErrorResponse(IcebergBaseModel): + error: Literal[ + "invalid_request", "invalid_client", "invalid_grant", "unauthorized_client", "unsupported_grant_type", "invalid_scope" + ] + error_description: Optional[str] = None + error_uri: Optional[str] = None + + +class RestCatalog(Catalog): + uri: str + _session: Session + + def __init__(self, name: str, **properties: str): + """Rest Catalog. + + You either need to provide a client_id and client_secret, or an already valid token. + + Args: + name: Name to identify the catalog. + properties: Properties that are passed along to the configuration. + """ + super().__init__(name, **properties) + self.uri = properties[URI] + self._fetch_config() + self._session = self._create_session() + + def _create_session(self) -> Session: + """Create a request session with provided catalog configuration.""" + session = Session() + + # Sets the client side and server side SSL cert verification, if provided as properties. + if ssl_config := self.properties.get(SSL): + if ssl_ca_bundle := ssl_config.get(CA_BUNDLE): # type: ignore + session.verify = ssl_ca_bundle + if ssl_client := ssl_config.get(CLIENT): # type: ignore + if all(k in ssl_client for k in (CERT, KEY)): + session.cert = (ssl_client[CERT], ssl_client[KEY]) + elif ssl_client_cert := ssl_client.get(CERT): + session.cert = ssl_client_cert + + # If we have credentials, but not a token, we want to fetch a token + if TOKEN not in self.properties and CREDENTIAL in self.properties: + self.properties[TOKEN] = self._fetch_access_token(session, self.properties[CREDENTIAL]) + + # Set Auth token for subsequent calls in the session + if token := self.properties.get(TOKEN): + session.headers[AUTHORIZATION_HEADER] = f"{BEARER_PREFIX} {token}" + + # Set HTTP headers + session.headers["Content-type"] = "application/json" + session.headers["X-Client-Version"] = ICEBERG_REST_SPEC_VERSION + session.headers["User-Agent"] = f"PyIceberg/{__version__}" + + # Configure SigV4 Request Signing + if str(self.properties.get(SIGV4, False)).lower() == "true": + self._init_sigv4(session) + + return session + + def _check_valid_namespace_identifier(self, identifier: Union[str, Identifier]) -> Identifier: + """Check if the identifier has at least one element.""" + identifier_tuple = Catalog.identifier_to_tuple(identifier) + if len(identifier_tuple) < 1: + raise NoSuchNamespaceError(f"Empty namespace identifier: {identifier}") + return identifier_tuple + + def url(self, endpoint: str, prefixed: bool = True, **kwargs: Any) -> str: + """Construct the endpoint. + + Args: + endpoint: Resource identifier that points to the REST catalog. + prefixed: If the prefix return by the config needs to be appended. + + Returns: + The base url of the rest catalog. + """ + url = self.uri + url = url + "v1/" if url.endswith("/") else url + "/v1/" + + if prefixed: + url += self.properties.get(PREFIX, "") + url = url if url.endswith("/") else url + "/" + + return url + endpoint.format(**kwargs) + + def _fetch_access_token(self, session: Session, credential: str) -> str: + if SEMICOLON in credential: + client_id, client_secret = credential.split(SEMICOLON) + else: + client_id, client_secret = None, credential + data = {GRANT_TYPE: CLIENT_CREDENTIALS, CLIENT_ID: client_id, CLIENT_SECRET: client_secret, SCOPE: CATALOG_SCOPE} + url = self.url(Endpoints.get_token, prefixed=False) + # Uses application/x-www-form-urlencoded by default + response = session.post(url=url, data=data) + try: + response.raise_for_status() + except HTTPError as exc: + self._handle_non_200_response(exc, {400: OAuthError, 401: OAuthError}) + + return TokenResponse(**response.json()).access_token + + def _fetch_config(self) -> None: + params = {} + if warehouse_location := self.properties.get(WAREHOUSE_LOCATION): + params[WAREHOUSE_LOCATION] = warehouse_location + + with self._create_session() as session: + response = session.get(self.url(Endpoints.get_config, prefixed=False), params=params) + try: + response.raise_for_status() + except HTTPError as exc: + self._handle_non_200_response(exc, {}) + config_response = ConfigResponse(**response.json()) + + config = config_response.defaults + config.update(self.properties) + config.update(config_response.overrides) + self.properties = config + + # Update URI based on overrides + self.uri = config[URI] + + def _split_identifier_for_path(self, identifier: Union[str, Identifier]) -> Properties: + identifier_tuple = self.identifier_to_tuple(identifier) + if len(identifier_tuple) <= 1: + raise NoSuchTableError(f"Missing namespace or invalid identifier: {'.'.join(identifier_tuple)}") + return {"namespace": NAMESPACE_SEPARATOR.join(identifier_tuple[:-1]), "table": identifier_tuple[-1]} + + def _split_identifier_for_json(self, identifier: Union[str, Identifier]) -> Dict[str, Union[Identifier, str]]: + identifier_tuple = self.identifier_to_tuple(identifier) + if len(identifier_tuple) <= 1: + raise NoSuchTableError(f"Missing namespace or invalid identifier: {identifier_tuple}") + return {"namespace": identifier_tuple[:-1], "name": identifier_tuple[-1]} + + def _handle_non_200_response(self, exc: HTTPError, error_handler: Dict[int, Type[Exception]]) -> None: + exception: Type[Exception] + code = exc.response.status_code + if code in error_handler: + exception = error_handler[code] + elif code == 400: + exception = BadRequestError + elif code == 401: + exception = UnauthorizedError + elif code == 403: + exception = ForbiddenError + elif code == 422: + exception = RESTError + elif code == 419: + exception = AuthorizationExpiredError + elif code == 501: + exception = NotImplementedError + elif code == 503: + exception = ServiceUnavailableError + elif 500 <= code < 600: + exception = ServerError + else: + exception = RESTError + + try: + if exception == OAuthError: + # The OAuthErrorResponse has a different format + error = OAuthErrorResponse(**exc.response.json()) + response = str(error.error) + if description := error.error_description: + response += f": {description}" + if uri := error.error_uri: + response += f" ({uri})" + else: + error = ErrorResponse(**exc.response.json()).error + response = f"{error.type}: {error.message}" + except JSONDecodeError: + # In the case we don't have a proper response + response = f"RESTError {exc.response.status_code}: Could not decode json payload: {exc.response.text}" + except ValidationError as e: + # In the case we don't have a proper response + errs = ", ".join(err["msg"] for err in e.errors()) + response = ( + f"RESTError {exc.response.status_code}: Received unexpected JSON Payload: {exc.response.text}, errors: {errs}" + ) + + raise exception(response) from exc + + def _init_sigv4(self, session: Session) -> None: + from urllib import parse + + import boto3 + from botocore.auth import SigV4Auth + from botocore.awsrequest import AWSRequest + from requests import PreparedRequest + from requests.adapters import HTTPAdapter + + class SigV4Adapter(HTTPAdapter): + def __init__(self, **properties: str): + super().__init__() + self._properties = properties + + def add_headers(self, request: PreparedRequest, **kwargs: Any) -> None: # pylint: disable=W0613 + boto_session = boto3.Session() + credentials = boto_session.get_credentials().get_frozen_credentials() + region = self._properties.get(SIGV4_REGION, boto_session.region_name) + service = self._properties.get(SIGV4_SERVICE, "execute-api") + + url = str(request.url).split("?")[0] + query = str(parse.urlsplit(request.url).query) + params = dict(parse.parse_qsl(query)) + + # remove the connection header as it will be updated after signing + del request.headers["connection"] + + aws_request = AWSRequest( + method=request.method, url=url, params=params, data=request.body, headers=dict(request.headers) + ) + + SigV4Auth(credentials, service, region).add_auth(aws_request) + original_header = request.headers + signed_headers = aws_request.headers + relocated_headers = {} + + # relocate headers if there is a conflict with signed headers + for header, value in original_header.items(): + if header in signed_headers and signed_headers[header] != value: + relocated_headers[f"Original-{header}"] = value + + request.headers.update(relocated_headers) + request.headers.update(signed_headers) + + session.mount(self.uri, SigV4Adapter(**self.properties)) + + def _response_to_table(self, identifier_tuple: Tuple[str, ...], table_response: TableResponse) -> Table: + return Table( + identifier=(self.name,) + identifier_tuple if self.name else identifier_tuple, + metadata_location=table_response.metadata_location, + metadata=table_response.metadata, + io=self._load_file_io( + {**table_response.metadata.properties, **table_response.config}, table_response.metadata_location + ), + catalog=self, + ) + + def create_table( + self, + identifier: Union[str, Identifier], + schema: Schema, + location: Optional[str] = None, + partition_spec: PartitionSpec = UNPARTITIONED_PARTITION_SPEC, + sort_order: SortOrder = UNSORTED_SORT_ORDER, + properties: Properties = EMPTY_DICT, + ) -> Table: + namespace_and_table = self._split_identifier_for_path(identifier) + request = CreateTableRequest( + name=namespace_and_table["table"], + location=location, + table_schema=schema, + partition_spec=partition_spec, + write_order=sort_order, + properties=properties, + ) + serialized_json = request.model_dump_json().encode("utf-8") + response = self._session.post( + self.url(Endpoints.create_table, namespace=namespace_and_table["namespace"]), + data=serialized_json, + ) + try: + response.raise_for_status() + except HTTPError as exc: + self._handle_non_200_response(exc, {409: TableAlreadyExistsError}) + + table_response = TableResponse(**response.json()) + return self._response_to_table(self.identifier_to_tuple(identifier), table_response) + + def register_table(self, identifier: Union[str, Identifier], metadata_location: str) -> Table: + """Register a new table using existing metadata. + + Args: + identifier Union[str, Identifier]: Table identifier for the table + metadata_location str: The location to the metadata + + Returns: + Table: The newly registered table + + Raises: + TableAlreadyExistsError: If the table already exists + """ + namespace_and_table = self._split_identifier_for_path(identifier) + request = RegisterTableRequest( + name=namespace_and_table["table"], + metadata_location=metadata_location, + ) + serialized_json = request.model_dump_json().encode("utf-8") + response = self._session.post( + self.url(Endpoints.register_table, namespace=namespace_and_table["namespace"]), + data=serialized_json, + ) + try: + response.raise_for_status() + except HTTPError as exc: + self._handle_non_200_response(exc, {409: TableAlreadyExistsError}) + + table_response = TableResponse(**response.json()) + return self._response_to_table(self.identifier_to_tuple(identifier), table_response) + + def list_tables(self, namespace: Union[str, Identifier]) -> List[Identifier]: + namespace_tuple = self._check_valid_namespace_identifier(namespace) + namespace_concat = NAMESPACE_SEPARATOR.join(namespace_tuple) + response = self._session.get(self.url(Endpoints.list_tables, namespace=namespace_concat)) + try: + response.raise_for_status() + except HTTPError as exc: + self._handle_non_200_response(exc, {404: NoSuchNamespaceError}) + return [(*table.namespace, table.name) for table in ListTablesResponse(**response.json()).identifiers] + + def load_table(self, identifier: Union[str, Identifier]) -> Table: + identifier_tuple = self.identifier_to_tuple(identifier) + + if len(identifier_tuple) <= 1: + raise NoSuchTableError(f"Missing namespace or invalid identifier: {identifier}") + + response = self._session.get(self.url(Endpoints.load_table, prefixed=True, **self._split_identifier_for_path(identifier))) + try: + response.raise_for_status() + except HTTPError as exc: + self._handle_non_200_response(exc, {404: NoSuchTableError}) + + table_response = TableResponse(**response.json()) + return self._response_to_table(identifier_tuple, table_response) + + def drop_table(self, identifier: Union[str, Identifier], purge_requested: bool = False) -> None: + response = self._session.delete( + self.url(Endpoints.drop_table, prefixed=True, purge=purge_requested, **self._split_identifier_for_path(identifier)), + ) + try: + response.raise_for_status() + except HTTPError as exc: + self._handle_non_200_response(exc, {404: NoSuchTableError}) + + def purge_table(self, identifier: Union[str, Identifier]) -> None: + self.drop_table(identifier=identifier, purge_requested=True) + + def rename_table(self, from_identifier: Union[str, Identifier], to_identifier: Union[str, Identifier]) -> Table: + payload = { + "source": self._split_identifier_for_json(from_identifier), + "destination": self._split_identifier_for_json(to_identifier), + } + response = self._session.post(self.url(Endpoints.rename_table), json=payload) + try: + response.raise_for_status() + except HTTPError as exc: + self._handle_non_200_response(exc, {404: NoSuchTableError, 409: TableAlreadyExistsError}) + + return self.load_table(to_identifier) + + def _commit_table(self, table_request: CommitTableRequest) -> CommitTableResponse: + """Update the table. + + Args: + table_request (CommitTableRequest): The table requests to be carried out. + + Returns: + CommitTableResponse: The updated metadata. + + Raises: + NoSuchTableError: If a table with the given identifier does not exist. + """ + response = self._session.post( + self.url(Endpoints.update_table, prefixed=True, **self._split_identifier_for_path(table_request.identifier)), + data=table_request.model_dump_json().encode("utf-8"), + ) + try: + response.raise_for_status() + except HTTPError as exc: + self._handle_non_200_response( + exc, + { + 409: CommitFailedException, + 500: CommitStateUnknownException, + 502: CommitStateUnknownException, + 504: CommitStateUnknownException, + }, + ) + return CommitTableResponse(**response.json()) + + def create_namespace(self, namespace: Union[str, Identifier], properties: Properties = EMPTY_DICT) -> None: + namespace_tuple = self._check_valid_namespace_identifier(namespace) + payload = {"namespace": namespace_tuple, "properties": properties} + response = self._session.post(self.url(Endpoints.create_namespace), json=payload) + try: + response.raise_for_status() + except HTTPError as exc: + self._handle_non_200_response(exc, {404: NoSuchNamespaceError, 409: NamespaceAlreadyExistsError}) + + def drop_namespace(self, namespace: Union[str, Identifier]) -> None: + namespace_tuple = self._check_valid_namespace_identifier(namespace) + namespace = NAMESPACE_SEPARATOR.join(namespace_tuple) + response = self._session.delete(self.url(Endpoints.drop_namespace, namespace=namespace)) + try: + response.raise_for_status() + except HTTPError as exc: + self._handle_non_200_response(exc, {404: NoSuchNamespaceError}) + + def list_namespaces(self, namespace: Union[str, Identifier] = ()) -> List[Identifier]: + namespace_tuple = self.identifier_to_tuple(namespace) + response = self._session.get( + self.url( + f"{Endpoints.list_namespaces}?parent={NAMESPACE_SEPARATOR.join(namespace_tuple)}" + if namespace_tuple + else Endpoints.list_namespaces + ), + ) + try: + response.raise_for_status() + except HTTPError as exc: + self._handle_non_200_response(exc, {}) + + namespaces = ListNamespaceResponse(**response.json()) + return [namespace_tuple + child_namespace for child_namespace in namespaces.namespaces] + + def load_namespace_properties(self, namespace: Union[str, Identifier]) -> Properties: + namespace_tuple = self._check_valid_namespace_identifier(namespace) + namespace = NAMESPACE_SEPARATOR.join(namespace_tuple) + response = self._session.get(self.url(Endpoints.load_namespace_metadata, namespace=namespace)) + try: + response.raise_for_status() + except HTTPError as exc: + self._handle_non_200_response(exc, {404: NoSuchNamespaceError}) + + return NamespaceResponse(**response.json()).properties + + def update_namespace_properties( + self, namespace: Union[str, Identifier], removals: Optional[Set[str]] = None, updates: Properties = EMPTY_DICT + ) -> PropertiesUpdateSummary: + namespace_tuple = self._check_valid_namespace_identifier(namespace) + namespace = NAMESPACE_SEPARATOR.join(namespace_tuple) + payload = {"removals": list(removals or []), "updates": updates} + response = self._session.post(self.url(Endpoints.update_namespace_properties, namespace=namespace), json=payload) + try: + response.raise_for_status() + except HTTPError as exc: + self._handle_non_200_response(exc, {404: NoSuchNamespaceError}) + parsed_response = UpdateNamespacePropertiesResponse(**response.json()) + return PropertiesUpdateSummary( + removed=parsed_response.removed, + updated=parsed_response.updated, + missing=parsed_response.missing, + ) diff --git a/pyiceberg/catalog/sql.py b/pyiceberg/catalog/sql.py new file mode 100644 index 0000000000..bca0fe44da --- /dev/null +++ b/pyiceberg/catalog/sql.py @@ -0,0 +1,515 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from typing import ( + List, + Optional, + Set, + Union, +) + +from sqlalchemy import ( + String, + create_engine, + delete, + insert, + select, + union, + update, +) +from sqlalchemy.exc import IntegrityError +from sqlalchemy.orm import ( + DeclarativeBase, + Mapped, + MappedAsDataclass, + Session, + mapped_column, +) + +from pyiceberg.catalog import ( + METADATA_LOCATION, + Catalog, + Identifier, + Properties, + PropertiesUpdateSummary, +) +from pyiceberg.exceptions import ( + NamespaceAlreadyExistsError, + NamespaceNotEmptyError, + NoSuchNamespaceError, + NoSuchPropertyException, + NoSuchTableError, + TableAlreadyExistsError, +) +from pyiceberg.io import load_file_io +from pyiceberg.partitioning import UNPARTITIONED_PARTITION_SPEC, PartitionSpec +from pyiceberg.schema import Schema +from pyiceberg.serializers import FromInputFile +from pyiceberg.table import CommitTableRequest, CommitTableResponse, Table +from pyiceberg.table.metadata import new_table_metadata +from pyiceberg.table.sorting import UNSORTED_SORT_ORDER, SortOrder +from pyiceberg.typedef import EMPTY_DICT + + +class SqlCatalogBaseTable(MappedAsDataclass, DeclarativeBase): + pass + + +class IcebergTables(SqlCatalogBaseTable): + __tablename__ = "iceberg_tables" + + catalog_name: Mapped[str] = mapped_column(String(255), nullable=False, primary_key=True) + table_namespace: Mapped[str] = mapped_column(String(255), nullable=False, primary_key=True) + table_name: Mapped[str] = mapped_column(String(255), nullable=False, primary_key=True) + metadata_location: Mapped[Optional[str]] = mapped_column(String(1000), nullable=True) + previous_metadata_location: Mapped[Optional[str]] = mapped_column(String(1000), nullable=True) + + +class IcebergNamespaceProperties(SqlCatalogBaseTable): + __tablename__ = "iceberg_namespace_properties" + # Catalog minimum Namespace Properties + NAMESPACE_MINIMAL_PROPERTIES = {"exists": "true"} + + catalog_name: Mapped[str] = mapped_column(String(255), nullable=False, primary_key=True) + namespace: Mapped[str] = mapped_column(String(255), nullable=False, primary_key=True) + property_key: Mapped[str] = mapped_column(String(255), nullable=False, primary_key=True) + property_value: Mapped[str] = mapped_column(String(1000), nullable=False) + + +class SqlCatalog(Catalog): + def __init__(self, name: str, **properties: str): + super().__init__(name, **properties) + + if not (uri_prop := self.properties.get("uri")): + raise NoSuchPropertyException("SQL connection URI is required") + self.engine = create_engine(uri_prop, echo=True) + + def create_tables(self) -> None: + SqlCatalogBaseTable.metadata.create_all(self.engine) + + def destroy_tables(self) -> None: + SqlCatalogBaseTable.metadata.drop_all(self.engine) + + def _convert_orm_to_iceberg(self, orm_table: IcebergTables) -> Table: + # Check for expected properties. + if not (metadata_location := orm_table.metadata_location): + raise NoSuchTableError(f"Table property {METADATA_LOCATION} is missing") + if not (table_namespace := orm_table.table_namespace): + raise NoSuchTableError(f"Table property {IcebergTables.table_namespace} is missing") + if not (table_name := orm_table.table_name): + raise NoSuchTableError(f"Table property {IcebergTables.table_name} is missing") + + io = load_file_io(properties=self.properties, location=metadata_location) + file = io.new_input(metadata_location) + metadata = FromInputFile.table_metadata(file) + return Table( + identifier=(self.name, table_namespace, table_name), + metadata=metadata, + metadata_location=metadata_location, + io=self._load_file_io(metadata.properties, metadata_location), + catalog=self, + ) + + def create_table( + self, + identifier: Union[str, Identifier], + schema: Schema, + location: Optional[str] = None, + partition_spec: PartitionSpec = UNPARTITIONED_PARTITION_SPEC, + sort_order: SortOrder = UNSORTED_SORT_ORDER, + properties: Properties = EMPTY_DICT, + ) -> Table: + """ + Create an Iceberg table. + + Args: + identifier: Table identifier. + schema: Table's schema. + location: Location for the table. Optional Argument. + partition_spec: PartitionSpec for the table. + sort_order: SortOrder for the table. + properties: Table properties that can be a string based dictionary. + + Returns: + Table: the created table instance. + + Raises: + AlreadyExistsError: If a table with the name already exists. + ValueError: If the identifier is invalid, or no path is given to store metadata. + + """ + database_name, table_name = self.identifier_to_database_and_table(identifier) + if not self._namespace_exists(database_name): + raise NoSuchNamespaceError(f"Namespace does not exist: {database_name}") + + location = self._resolve_table_location(location, database_name, table_name) + metadata_location = self._get_metadata_location(location=location) + metadata = new_table_metadata( + location=location, schema=schema, partition_spec=partition_spec, sort_order=sort_order, properties=properties + ) + io = load_file_io(properties=self.properties, location=metadata_location) + self._write_metadata(metadata, io, metadata_location) + + with Session(self.engine) as session: + try: + session.add( + IcebergTables( + catalog_name=self.name, + table_namespace=database_name, + table_name=table_name, + metadata_location=metadata_location, + previous_metadata_location=None, + ) + ) + session.commit() + except IntegrityError as e: + raise TableAlreadyExistsError(f"Table {database_name}.{table_name} already exists") from e + + return self.load_table(identifier=identifier) + + def register_table(self, identifier: Union[str, Identifier], metadata_location: str) -> Table: + """Register a new table using existing metadata. + + Args: + identifier Union[str, Identifier]: Table identifier for the table + metadata_location str: The location to the metadata + + Returns: + Table: The newly registered table + + Raises: + TableAlreadyExistsError: If the table already exists + NoSuchNamespaceError: If namespace does not exist + """ + database_name, table_name = self.identifier_to_database_and_table(identifier) + if not self._namespace_exists(database_name): + raise NoSuchNamespaceError(f"Namespace does not exist: {database_name}") + + with Session(self.engine) as session: + try: + session.add( + IcebergTables( + catalog_name=self.name, + table_namespace=database_name, + table_name=table_name, + metadata_location=metadata_location, + previous_metadata_location=None, + ) + ) + session.commit() + except IntegrityError as e: + raise TableAlreadyExistsError(f"Table {database_name}.{table_name} already exists") from e + + return self.load_table(identifier=identifier) + + def load_table(self, identifier: Union[str, Identifier]) -> Table: + """Load the table's metadata and return the table instance. + + You can also use this method to check for table existence using 'try catalog.table() except NoSuchTableError'. + Note: This method doesn't scan data stored in the table. + + Args: + identifier (str | Identifier): Table identifier. + + Returns: + Table: the table instance with its metadata. + + Raises: + NoSuchTableError: If a table with the name does not exist. + """ + database_name, table_name = self.identifier_to_database_and_table(identifier, NoSuchTableError) + with Session(self.engine) as session: + stmt = select(IcebergTables).where( + IcebergTables.catalog_name == self.name, + IcebergTables.table_namespace == database_name, + IcebergTables.table_name == table_name, + ) + result = session.scalar(stmt) + if result: + return self._convert_orm_to_iceberg(result) + raise NoSuchTableError(f"Table does not exist: {database_name}.{table_name}") + + def drop_table(self, identifier: Union[str, Identifier]) -> None: + """Drop a table. + + Args: + identifier (str | Identifier): Table identifier. + + Raises: + NoSuchTableError: If a table with the name does not exist. + """ + database_name, table_name = self.identifier_to_database_and_table(identifier, NoSuchTableError) + with Session(self.engine) as session: + res = session.execute( + delete(IcebergTables).where( + IcebergTables.catalog_name == self.name, + IcebergTables.table_namespace == database_name, + IcebergTables.table_name == table_name, + ) + ) + session.commit() + if res.rowcount < 1: + raise NoSuchTableError(f"Table does not exist: {database_name}.{table_name}") + + def rename_table(self, from_identifier: Union[str, Identifier], to_identifier: Union[str, Identifier]) -> Table: + """Rename a fully classified table name. + + Args: + from_identifier (str | Identifier): Existing table identifier. + to_identifier (str | Identifier): New table identifier. + + Returns: + Table: the updated table instance with its metadata. + + Raises: + NoSuchTableError: If a table with the name does not exist. + TableAlreadyExistsError: If a table with the new name already exist. + NoSuchNamespaceError: If the target namespace does not exist. + """ + from_database_name, from_table_name = self.identifier_to_database_and_table(from_identifier, NoSuchTableError) + to_database_name, to_table_name = self.identifier_to_database_and_table(to_identifier) + if not self._namespace_exists(to_database_name): + raise NoSuchNamespaceError(f"Namespace does not exist: {to_database_name}") + with Session(self.engine) as session: + try: + stmt = ( + update(IcebergTables) + .where( + IcebergTables.catalog_name == self.name, + IcebergTables.table_namespace == from_database_name, + IcebergTables.table_name == from_table_name, + ) + .values(table_namespace=to_database_name, table_name=to_table_name) + ) + result = session.execute(stmt) + if result.rowcount < 1: + raise NoSuchTableError(f"Table does not exist: {from_table_name}") + session.commit() + except IntegrityError as e: + raise TableAlreadyExistsError(f"Table {to_database_name}.{to_table_name} already exists") from e + return self.load_table(to_identifier) + + def _commit_table(self, table_request: CommitTableRequest) -> CommitTableResponse: + """Update one or more tables. + + Args: + table_request (CommitTableRequest): The table requests to be carried out. + + Returns: + CommitTableResponse: The updated metadata. + + Raises: + NoSuchTableError: If a table with the given identifier does not exist. + """ + raise NotImplementedError + + def _namespace_exists(self, identifier: Union[str, Identifier]) -> bool: + namespace = self.identifier_to_database(identifier) + with Session(self.engine) as session: + stmt = ( + select(IcebergTables) + .where(IcebergTables.catalog_name == self.name, IcebergTables.table_namespace == namespace) + .limit(1) + ) + result = session.execute(stmt).all() + if result: + return True + stmt = ( + select(IcebergNamespaceProperties) + .where( + IcebergNamespaceProperties.catalog_name == self.name, + IcebergNamespaceProperties.namespace == namespace, + ) + .limit(1) + ) + result = session.execute(stmt).all() + if result: + return True + return False + + def create_namespace(self, namespace: Union[str, Identifier], properties: Properties = EMPTY_DICT) -> None: + """Create a namespace in the catalog. + + Args: + namespace (str | Identifier): Namespace identifier. + properties (Properties): A string dictionary of properties for the given namespace. + + Raises: + NamespaceAlreadyExistsError: If a namespace with the given name already exists. + """ + if not properties: + properties = IcebergNamespaceProperties.NAMESPACE_MINIMAL_PROPERTIES + database_name = self.identifier_to_database(namespace) + if self._namespace_exists(database_name): + raise NamespaceAlreadyExistsError(f"Database {database_name} already exists") + + create_properties = properties if properties else IcebergNamespaceProperties.NAMESPACE_MINIMAL_PROPERTIES + with Session(self.engine) as session: + for key, value in create_properties.items(): + session.add( + IcebergNamespaceProperties( + catalog_name=self.name, namespace=database_name, property_key=key, property_value=value + ) + ) + session.commit() + + def drop_namespace(self, namespace: Union[str, Identifier]) -> None: + """Drop a namespace. + + Args: + namespace (str | Identifier): Namespace identifier. + + Raises: + NoSuchNamespaceError: If a namespace with the given name does not exist. + NamespaceNotEmptyError: If the namespace is not empty. + """ + database_name = self.identifier_to_database(namespace, NoSuchNamespaceError) + if self._namespace_exists(database_name): + if tables := self.list_tables(database_name): + raise NamespaceNotEmptyError(f"Database {database_name} is not empty. {len(tables)} tables exist.") + + with Session(self.engine) as session: + session.execute( + delete(IcebergNamespaceProperties).where( + IcebergNamespaceProperties.catalog_name == self.name, + IcebergNamespaceProperties.namespace == database_name, + ) + ) + session.commit() + + def list_tables(self, namespace: Union[str, Identifier]) -> List[Identifier]: + """List tables under the given namespace in the catalog. + + If namespace not provided, will list all tables in the catalog. + + Args: + namespace (str | Identifier): Namespace identifier to search. + + Returns: + List[Identifier]: list of table identifiers. + + Raises: + NoSuchNamespaceError: If a namespace with the given name does not exist. + """ + database_name = self.identifier_to_database(namespace, NoSuchNamespaceError) + + stmt = select(IcebergTables).where( + IcebergTables.catalog_name == self.name, IcebergTables.table_namespace == database_name + ) + with Session(self.engine) as session: + result = session.scalars(stmt) + return [(table.table_namespace, table.table_name) for table in result] + + def list_namespaces(self, namespace: Union[str, Identifier] = ()) -> List[Identifier]: + """List namespaces from the given namespace. If not given, list top-level namespaces from the catalog. + + Args: + namespace (str | Identifier): Namespace identifier to search. + + Returns: + List[Identifier]: a List of namespace identifiers. + + Raises: + NoSuchNamespaceError: If a namespace with the given name does not exist. + """ + if namespace and not self._namespace_exists(namespace): + raise NoSuchNamespaceError(f"Namespace does not exist: {namespace}") + + table_stmt = select(IcebergTables.table_namespace).where(IcebergTables.catalog_name == self.name) + namespace_stmt = select(IcebergNamespaceProperties.namespace).where(IcebergNamespaceProperties.catalog_name == self.name) + if namespace: + database_name = self.identifier_to_database(namespace, NoSuchNamespaceError) + table_stmt = table_stmt.where(IcebergTables.table_namespace.like(database_name)) + namespace_stmt = namespace_stmt.where(IcebergNamespaceProperties.namespace.like(database_name)) + stmt = union( + table_stmt, + namespace_stmt, + ) + with Session(self.engine) as session: + return [self.identifier_to_tuple(namespace_col) for namespace_col in session.execute(stmt).scalars()] + + def load_namespace_properties(self, namespace: Union[str, Identifier]) -> Properties: + """Get properties for a namespace. + + Args: + namespace (str | Identifier): Namespace identifier. + + Returns: + Properties: Properties for the given namespace. + + Raises: + NoSuchNamespaceError: If a namespace with the given name does not exist. + """ + database_name = self.identifier_to_database(namespace, NoSuchNamespaceError) + + stmt = select(IcebergNamespaceProperties).where( + IcebergNamespaceProperties.catalog_name == self.name, IcebergNamespaceProperties.namespace == database_name + ) + with Session(self.engine) as session: + result = session.scalars(stmt) + return {props.property_key: props.property_value for props in result} + + def update_namespace_properties( + self, namespace: Union[str, Identifier], removals: Optional[Set[str]] = None, updates: Properties = EMPTY_DICT + ) -> PropertiesUpdateSummary: + """Remove provided property keys and update properties for a namespace. + + Args: + namespace (str | Identifier): Namespace identifier. + removals (Set[str]): Set of property keys that need to be removed. Optional Argument. + updates (Properties): Properties to be updated for the given namespace. + + Raises: + NoSuchNamespaceError: If a namespace with the given name does not exist. + ValueError: If removals and updates have overlapping keys. + """ + database_name = self.identifier_to_database(namespace) + if not self._namespace_exists(database_name): + raise NoSuchNamespaceError(f"Database {database_name} does not exists") + + current_properties = self.load_namespace_properties(namespace=namespace) + properties_update_summary = self._get_updated_props_and_update_summary( + current_properties=current_properties, removals=removals, updates=updates + )[0] + + with Session(self.engine) as session: + if removals: + delete_stmt = delete(IcebergNamespaceProperties).where( + IcebergNamespaceProperties.catalog_name == self.name, + IcebergNamespaceProperties.namespace == database_name, + IcebergNamespaceProperties.property_key.in_(removals), + ) + session.execute(delete_stmt) + + if updates: + # SQLAlchemy does not (yet) support engine agnostic UPSERT + # https://docs.sqlalchemy.org/en/20/orm/queryguide/dml.html#orm-upsert-statements + # This is not a problem since it runs in a single transaction + delete_stmt = delete(IcebergNamespaceProperties).where( + IcebergNamespaceProperties.catalog_name == self.name, + IcebergNamespaceProperties.namespace == database_name, + IcebergNamespaceProperties.property_key.in_(set(updates.keys())), + ) + session.execute(delete_stmt) + insert_stmt = insert(IcebergNamespaceProperties) + for property_key, property_value in updates.items(): + insert_stmt = insert_stmt.values( + catalog_name=self.name, namespace=database_name, property_key=property_key, property_value=property_value + ) + session.execute(insert_stmt) + session.commit() + return properties_update_summary diff --git a/pyiceberg/cli/__init__.py b/pyiceberg/cli/__init__.py new file mode 100644 index 0000000000..13a83393a9 --- /dev/null +++ b/pyiceberg/cli/__init__.py @@ -0,0 +1,16 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. diff --git a/pyiceberg/cli/console.py b/pyiceberg/cli/console.py new file mode 100644 index 0000000000..62f7a02fab --- /dev/null +++ b/pyiceberg/cli/console.py @@ -0,0 +1,374 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# pylint: disable=broad-except,redefined-builtin,redefined-outer-name +from functools import wraps +from typing import ( + Any, + Callable, + Literal, + Optional, + Tuple, +) + +import click +from click import Context + +from pyiceberg import __version__ +from pyiceberg.catalog import Catalog, load_catalog +from pyiceberg.cli.output import ConsoleOutput, JsonOutput, Output +from pyiceberg.exceptions import NoSuchNamespaceError, NoSuchPropertyException, NoSuchTableError + + +def catch_exception() -> Callable: # type: ignore + def decorator(func: Callable) -> Callable: # type: ignore + @wraps(func) + def wrapper(*args: Any, **kwargs: Any): # type: ignore + try: + return func(*args, **kwargs) + except Exception as e: + ctx: Context = click.get_current_context(silent=True) + _, output = _catalog_and_output(ctx) + output.exception(e) + ctx.exit(1) + + return wrapper + + return decorator + + +@click.group() +@click.option("--catalog") +@click.option("--verbose", type=click.BOOL) +@click.option("--output", type=click.Choice(["text", "json"]), default="text") +@click.option("--uri") +@click.option("--credential") +@click.pass_context +def run(ctx: Context, catalog: Optional[str], verbose: bool, output: str, uri: Optional[str], credential: Optional[str]) -> None: + properties = {} + if uri: + properties["uri"] = uri + if credential: + properties["credential"] = credential + + ctx.ensure_object(dict) + if output == "text": + ctx.obj["output"] = ConsoleOutput(verbose=verbose) + else: + ctx.obj["output"] = JsonOutput(verbose=verbose) + + try: + ctx.obj["catalog"] = load_catalog(catalog, **properties) + except Exception as e: + ctx.obj["output"].exception(e) + ctx.exit(1) + + if not isinstance(ctx.obj["catalog"], Catalog): + ctx.obj["output"].exception( + ValueError("Could not determine catalog type from uri. REST (http/https) and Hive (thrift) is supported") + ) + ctx.exit(1) + + +def _catalog_and_output(ctx: Context) -> Tuple[Catalog, Output]: + """Small helper to set the types.""" + return ctx.obj["catalog"], ctx.obj["output"] + + +@run.command() +@click.pass_context +@click.argument("parent", required=False) +@catch_exception() +def list(ctx: Context, parent: Optional[str]) -> None: # pylint: disable=redefined-builtin + """List tables or namespaces.""" + catalog, output = _catalog_and_output(ctx) + + identifiers = catalog.list_namespaces(parent or ()) + if not identifiers and parent: + identifiers = catalog.list_tables(parent) + output.identifiers(identifiers) + + +@run.command() +@click.option("--entity", type=click.Choice(["any", "namespace", "table"]), default="any") +@click.argument("identifier") +@click.pass_context +@catch_exception() +def describe(ctx: Context, entity: Literal["name", "namespace", "table"], identifier: str) -> None: + """Describe a namespace or a table.""" + catalog, output = _catalog_and_output(ctx) + identifier_tuple = Catalog.identifier_to_tuple(identifier) + + is_namespace = False + if entity in {"namespace", "any"} and len(identifier_tuple) > 0: + try: + namespace_properties = catalog.load_namespace_properties(identifier_tuple) + output.describe_properties(namespace_properties) + is_namespace = True + except NoSuchNamespaceError as exc: + if entity != "any" or len(identifier_tuple) == 1: # type: ignore + raise exc + + is_table = False + if entity in {"table", "any"} and len(identifier_tuple) > 1: + try: + catalog_table = catalog.load_table(identifier) + output.describe_table(catalog_table) + is_table = True + except NoSuchTableError as exc: + if entity != "any": + raise exc + + if is_namespace is False and is_table is False: + raise NoSuchTableError(f"Table or namespace does not exist: {identifier}") + + +@run.command() +@click.argument("identifier") +@click.option("--history", is_flag=True) +@click.pass_context +@catch_exception() +def files(ctx: Context, identifier: str, history: bool) -> None: + """List all the files of the table.""" + catalog, output = _catalog_and_output(ctx) + + catalog_table = catalog.load_table(identifier) + output.files(catalog_table, history) + + +@run.command() +@click.argument("identifier") +@click.pass_context +@catch_exception() +def schema(ctx: Context, identifier: str) -> None: + """Get the schema of the table.""" + catalog, output = _catalog_and_output(ctx) + table = catalog.load_table(identifier) + output.schema(table.schema()) + + +@run.command() +@click.argument("identifier") +@click.pass_context +@catch_exception() +def spec(ctx: Context, identifier: str) -> None: + """Return the partition spec of the table.""" + catalog, output = _catalog_and_output(ctx) + table = catalog.load_table(identifier) + output.spec(table.spec()) + + +@run.command() +@click.argument("identifier") +@click.pass_context +@catch_exception() +def uuid(ctx: Context, identifier: str) -> None: + """Return the UUID of the table.""" + catalog, output = _catalog_and_output(ctx) + metadata = catalog.load_table(identifier).metadata + output.uuid(metadata.table_uuid) + + +@run.command() +@click.argument("identifier") +@click.pass_context +@catch_exception() +def location(ctx: Context, identifier: str) -> None: + """Return the location of the table.""" + catalog, output = _catalog_and_output(ctx) + table = catalog.load_table(identifier) + output.text(table.location()) + + +@run.command() +@click.pass_context +@catch_exception() +def version(ctx: Context) -> None: + """Print pyiceberg version.""" + ctx.obj["output"].version(__version__) + + +@run.group() +def drop() -> None: + """Operations to drop a namespace or table.""" + + +@drop.command() +@click.argument("identifier") +@click.pass_context +@catch_exception() +def table(ctx: Context, identifier: str) -> None: # noqa: F811 + """Drop a table.""" + catalog, output = _catalog_and_output(ctx) + + catalog.drop_table(identifier) + output.text(f"Dropped table: {identifier}") + + +@drop.command() +@click.argument("identifier") +@click.pass_context +@catch_exception() +def namespace(ctx: Context, identifier: str) -> None: + """Drop a namespace.""" + catalog, output = _catalog_and_output(ctx) + + catalog.drop_namespace(identifier) + output.text(f"Dropped namespace: {identifier}") + + +@run.command() +@click.argument("from_identifier") +@click.argument("to_identifier") +@click.pass_context +@catch_exception() +def rename(ctx: Context, from_identifier: str, to_identifier: str) -> None: + """Rename a table.""" + catalog, output = _catalog_and_output(ctx) + + catalog.rename_table(from_identifier, to_identifier) + output.text(f"Renamed table from {from_identifier} to {to_identifier}") + + +@run.group() +def properties() -> None: + """Properties on tables/namespaces.""" + + +@properties.group() +def get() -> None: + """Fetch properties on tables/namespaces.""" + + +@get.command("namespace") +@click.argument("identifier") +@click.argument("property_name", required=False) +@click.pass_context +@catch_exception() +def get_namespace(ctx: Context, identifier: str, property_name: str) -> None: + """Fetch properties on a namespace.""" + catalog, output = _catalog_and_output(ctx) + identifier_tuple = Catalog.identifier_to_tuple(identifier) + + namespace_properties = catalog.load_namespace_properties(identifier_tuple) + assert namespace_properties + + if property_name: + if property_value := namespace_properties.get(property_name): + output.text(property_value) + else: + raise NoSuchPropertyException(f"Could not find property {property_name} on namespace {identifier}") + else: + output.describe_properties(namespace_properties) + + +@get.command("table") +@click.argument("identifier") +@click.argument("property_name", required=False) +@click.pass_context +@catch_exception() +def get_table(ctx: Context, identifier: str, property_name: str) -> None: + """Fetch properties on a table.""" + catalog, output = _catalog_and_output(ctx) + identifier_tuple = Catalog.identifier_to_tuple(identifier) + + metadata = catalog.load_table(identifier_tuple).metadata + assert metadata + + if property_name: + if property_value := metadata.properties.get(property_name): + output.text(property_value) + else: + raise NoSuchPropertyException(f"Could not find property {property_name} on table {identifier}") + else: + output.describe_properties(metadata.properties) + + +@properties.group() +def set() -> None: + """Set a property on tables/namespaces.""" + + +@set.command() # type: ignore +@click.argument("identifier") +@click.argument("property_name") +@click.argument("property_value") +@click.pass_context +@catch_exception() +def namespace(ctx: Context, identifier: str, property_name: str, property_value: str) -> None: # noqa: F811 + """Set a property on a namespace.""" + catalog, output = _catalog_and_output(ctx) + + catalog.update_namespace_properties(identifier, updates={property_name: property_value}) + output.text(f"Updated {property_name} on {identifier}") + + +@set.command() # type: ignore +@click.argument("identifier") +@click.argument("property_name") +@click.argument("property_value") +@click.pass_context +@catch_exception() +def table(ctx: Context, identifier: str, property_name: str, property_value: str) -> None: # noqa: F811 + """Set a property on a table.""" + catalog, output = _catalog_and_output(ctx) + identifier_tuple = Catalog.identifier_to_tuple(identifier) + + _ = catalog.load_table(identifier_tuple) + output.text(f"Setting {property_name}={property_value} on {identifier}") + raise NotImplementedError("Writing is WIP") + + +@properties.group() +def remove() -> None: + """Remove a property from tables/namespaces.""" + + +@remove.command() # type: ignore +@click.argument("identifier") +@click.argument("property_name") +@click.pass_context +@catch_exception() +def namespace(ctx: Context, identifier: str, property_name: str) -> None: # noqa: F811 + """Remove a property from a namespace.""" + catalog, output = _catalog_and_output(ctx) + + result = catalog.update_namespace_properties(identifier, removals={property_name}) + + if result.removed == [property_name]: + output.text(f"Property {property_name} removed from {identifier}") + else: + raise NoSuchPropertyException(f"Property {property_name} does not exist on {identifier}") + + +@remove.command() # type: ignore +@click.argument("identifier") +@click.argument("property_name") +@click.pass_context +@catch_exception() +def table(ctx: Context, identifier: str, property_name: str) -> None: # noqa: F811 + """Remove a property from a table.""" + catalog, output = _catalog_and_output(ctx) + table = catalog.load_table(identifier) + if property_name in table.metadata.properties: + # We should think of the process here + # Do we want something similar as in Java: + # https://github.com/apache/iceberg/blob/master/api/src/main/java/org/apache/iceberg/Table.java#L178 + del table.metadata.properties + output.exception(NotImplementedError("Writing is WIP")) + ctx.exit(1) + else: + raise NoSuchPropertyException(f"Property {property_name} does not exist on {identifier}") diff --git a/pyiceberg/cli/output.py b/pyiceberg/cli/output.py new file mode 100644 index 0000000000..299f84dafe --- /dev/null +++ b/pyiceberg/cli/output.py @@ -0,0 +1,228 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +import json +from abc import ABC, abstractmethod +from typing import Any, List, Optional +from uuid import UUID + +from rich.console import Console +from rich.table import Table as RichTable +from rich.tree import Tree + +from pyiceberg.partitioning import PartitionSpec +from pyiceberg.schema import Schema +from pyiceberg.table import Table, TableMetadata +from pyiceberg.typedef import IcebergBaseModel, Identifier, Properties + + +class Output(ABC): + """Output interface for exporting.""" + + @abstractmethod + def exception(self, ex: Exception) -> None: + ... + + @abstractmethod + def identifiers(self, identifiers: List[Identifier]) -> None: + ... + + @abstractmethod + def describe_table(self, table: Table) -> None: + ... + + @abstractmethod + def files(self, table: Table, history: bool) -> None: + ... + + @abstractmethod + def describe_properties(self, properties: Properties) -> None: + ... + + @abstractmethod + def text(self, response: str) -> None: + ... + + @abstractmethod + def schema(self, schema: Schema) -> None: + ... + + @abstractmethod + def spec(self, spec: PartitionSpec) -> None: + ... + + @abstractmethod + def uuid(self, uuid: Optional[UUID]) -> None: + ... + + @abstractmethod + def version(self, version: str) -> None: + ... + + +class ConsoleOutput(Output): + """Writes to the console.""" + + verbose: bool + + def __init__(self, **properties: Any) -> None: + self.verbose = properties.get("verbose", False) + + @property + def _table(self) -> RichTable: + return RichTable.grid(padding=(0, 2)) + + def exception(self, ex: Exception) -> None: + if self.verbose: + Console(stderr=True).print_exception() + else: + Console(stderr=True).print(ex) + + def identifiers(self, identifiers: List[Identifier]) -> None: + table = self._table + for identifier in identifiers: + table.add_row(".".join(identifier)) + + Console().print(table) + + def describe_table(self, table: Table) -> None: + metadata = table.metadata + table_properties = self._table + + for key, value in metadata.properties.items(): + table_properties.add_row(key, value) + + schema_tree = Tree(f"Schema, id={table.metadata.current_schema_id}") + for field in table.schema().fields: + schema_tree.add(str(field)) + + snapshot_tree = Tree("Snapshots") + for snapshot in metadata.snapshots: + manifest_list_str = f": {snapshot.manifest_list}" if snapshot.manifest_list else "" + snapshot_tree.add(f"Snapshot {snapshot.snapshot_id}, schema {snapshot.schema_id}{manifest_list_str}") + + output_table = self._table + output_table.add_row("Table format version", str(metadata.format_version)) + output_table.add_row("Metadata location", table.metadata_location) + output_table.add_row("Table UUID", str(table.metadata.table_uuid)) + output_table.add_row("Last Updated", str(metadata.last_updated_ms)) + output_table.add_row("Partition spec", str(table.spec())) + output_table.add_row("Sort order", str(table.sort_order())) + output_table.add_row("Current schema", schema_tree) + output_table.add_row("Current snapshot", str(table.current_snapshot())) + output_table.add_row("Snapshots", snapshot_tree) + output_table.add_row("Properties", table_properties) + Console().print(output_table) + + def files(self, table: Table, history: bool) -> None: + if history: + snapshots = table.metadata.snapshots + else: + if snapshot := table.current_snapshot(): + snapshots = [snapshot] + else: + snapshots = [] + + snapshot_tree = Tree(f"Snapshots: {'.'.join(table.identifier)}") + io = table.io + + for snapshot in snapshots: + manifest_list_str = f": {snapshot.manifest_list}" if snapshot.manifest_list else "" + list_tree = snapshot_tree.add(f"Snapshot {snapshot.snapshot_id}, schema {snapshot.schema_id}{manifest_list_str}") + + manifest_list = snapshot.manifests(io) + for manifest in manifest_list: + manifest_tree = list_tree.add(f"Manifest: {manifest.manifest_path}") + for manifest_entry in manifest.fetch_manifest_entry(io, discard_deleted=False): + manifest_tree.add(f"Datafile: {manifest_entry.data_file.file_path}") + Console().print(snapshot_tree) + + def describe_properties(self, properties: Properties) -> None: + output_table = self._table + for k, v in properties.items(): + output_table.add_row(k, v) + Console().print(output_table) + + def text(self, response: str) -> None: + Console().print(response) + + def schema(self, schema: Schema) -> None: + output_table = self._table + for field in schema.fields: + output_table.add_row(field.name, str(field.field_type), field.doc or "") + Console().print(output_table) + + def spec(self, spec: PartitionSpec) -> None: + Console().print(str(spec)) + + def uuid(self, uuid: Optional[UUID]) -> None: + Console().print(str(uuid) if uuid else "missing") + + def version(self, version: str) -> None: + Console().print(version) + + +class JsonOutput(Output): + """Writes json to stdout.""" + + verbose: bool + + def __init__(self, **properties: Any) -> None: + self.verbose = properties.get("verbose", False) + + def _out(self, d: Any) -> None: + print(json.dumps(d)) + + def exception(self, ex: Exception) -> None: + self._out({"type": ex.__class__.__name__, "message": str(ex)}) + + def identifiers(self, identifiers: List[Identifier]) -> None: + self._out([".".join(identifier) for identifier in identifiers]) + + def describe_table(self, table: Table) -> None: + class FauxTable(IcebergBaseModel): + """Just to encode it using Pydantic.""" + + identifier: Identifier + metadata_location: str + metadata: TableMetadata + + print( + FauxTable( + identifier=table.identifier, metadata=table.metadata, metadata_location=table.metadata_location + ).model_dump_json() + ) + + def describe_properties(self, properties: Properties) -> None: + self._out(properties) + + def text(self, response: str) -> None: + print(json.dumps(response)) + + def schema(self, schema: Schema) -> None: + print(schema.model_dump_json()) + + def files(self, table: Table, history: bool) -> None: + pass + + def spec(self, spec: PartitionSpec) -> None: + print(spec.model_dump_json()) + + def uuid(self, uuid: Optional[UUID]) -> None: + self._out({"uuid": str(uuid) if uuid else "missing"}) + + def version(self, version: str) -> None: + self._out({"version": version}) diff --git a/pyiceberg/conversions.py b/pyiceberg/conversions.py new file mode 100644 index 0000000000..aecd43a949 --- /dev/null +++ b/pyiceberg/conversions.py @@ -0,0 +1,324 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +"""Utility module for various conversions around PrimitiveType implementations. + +This module enables: + - Converting partition strings to built-in python objects. + - Converting a value to a byte buffer. + - Converting a byte buffer to a value. + +Note: + Conversion logic varies based on the PrimitiveType implementation. Therefore conversion functions + are defined here as generic functions using the @singledispatch decorator. For each PrimitiveType + implementation, a concrete function is registered for each generic conversion function. For PrimitiveType + implementations that share the same conversion logic, registrations can be stacked. +""" +import uuid +from datetime import date, datetime, time +from decimal import Decimal +from functools import singledispatch +from struct import Struct +from typing import ( + Any, + Callable, + Optional, + Union, +) + +from pyiceberg.typedef import L +from pyiceberg.types import ( + BinaryType, + BooleanType, + DateType, + DecimalType, + DoubleType, + FixedType, + FloatType, + IntegerType, + LongType, + PrimitiveType, + StringType, + TimestampType, + TimestamptzType, + TimeType, + UUIDType, +) +from pyiceberg.utils.datetime import date_to_days, datetime_to_micros, time_to_micros +from pyiceberg.utils.decimal import decimal_to_bytes, unscaled_to_decimal + +_BOOL_STRUCT = Struct(" Callable: # type: ignore + """Handle cases where partition values are `None` or "__HIVE_DEFAULT_PARTITION__". + + Args: + func (Callable): A function registered to the singledispatch function `partition_to_py`. + """ + + def wrapper(primitive_type: PrimitiveType, value_str: Optional[str]) -> Any: + if value_str is None: + return None + elif value_str == "__HIVE_DEFAULT_PARTITION__": + return None + return func(primitive_type, value_str) + + return wrapper + + +@singledispatch +def partition_to_py(primitive_type: PrimitiveType, value_str: str) -> Union[int, float, str, uuid.UUID, bytes, Decimal]: + """Convert a partition string to a python built-in. + + Args: + primitive_type (PrimitiveType): An implementation of the PrimitiveType base class. + value_str (str): A string representation of a partition value. + """ + raise TypeError(f"Cannot convert '{value_str}' to unsupported type: {primitive_type}") + + +@partition_to_py.register(BooleanType) +@handle_none +def _(primitive_type: BooleanType, value_str: str) -> Union[int, float, str, uuid.UUID]: + return value_str.lower() == "true" + + +@partition_to_py.register(IntegerType) +@partition_to_py.register(LongType) +@partition_to_py.register(DateType) +@partition_to_py.register(TimeType) +@partition_to_py.register(TimestampType) +@partition_to_py.register(TimestamptzType) +@handle_none +def _(primitive_type: PrimitiveType, value_str: str) -> int: + """Convert a string to an integer value. + + Raises: + ValueError: If the scale/exponent is not 0. + """ + _, _, exponent = Decimal(value_str).as_tuple() + if exponent != 0: # Raise if there are digits to the right of the decimal + raise ValueError(f"Cannot convert partition value, value cannot have fractional digits for {primitive_type} partition") + return int(float(value_str)) + + +@partition_to_py.register(FloatType) +@partition_to_py.register(DoubleType) +@handle_none +def _(_: PrimitiveType, value_str: str) -> float: + return float(value_str) + + +@partition_to_py.register(StringType) +@handle_none +def _(_: StringType, value_str: str) -> str: + return value_str + + +@partition_to_py.register(UUIDType) +@handle_none +def _(_: UUIDType, value_str: str) -> uuid.UUID: + return uuid.UUID(value_str) + + +@partition_to_py.register(FixedType) +@partition_to_py.register(BinaryType) +@handle_none +def _(_: PrimitiveType, value_str: str) -> bytes: + return bytes(value_str, "UTF-8") + + +@partition_to_py.register(DecimalType) +@handle_none +def _(_: DecimalType, value_str: str) -> Decimal: + return Decimal(value_str) + + +@singledispatch +def to_bytes( + primitive_type: PrimitiveType, _: Union[bool, bytes, Decimal, date, datetime, float, int, str, time, uuid.UUID] +) -> bytes: + """Convert a built-in python value to bytes. + + This conversion follows the serialization scheme for storing single values as individual binary values defined in the Iceberg specification that + can be found at https://iceberg.apache.org/spec/#appendix-d-single-value-serialization + + Args: + primitive_type (PrimitiveType): An implementation of the PrimitiveType base class. + _: The value to convert to bytes (The type of this value depends on which dispatched function is + used--check dispatchable functions for type hints). + """ + raise TypeError(f"scale does not match {primitive_type}") + + +@to_bytes.register(BooleanType) +def _(_: BooleanType, value: bool) -> bytes: + return _BOOL_STRUCT.pack(1 if value else 0) + + +@to_bytes.register(IntegerType) +def _(_: PrimitiveType, value: int) -> bytes: + return _INT_STRUCT.pack(value) + + +@to_bytes.register(LongType) +def _(_: PrimitiveType, value: int) -> bytes: + return _LONG_STRUCT.pack(value) + + +@to_bytes.register(TimestampType) +@to_bytes.register(TimestamptzType) +def _(_: TimestampType, value: Union[datetime, int]) -> bytes: + if isinstance(value, datetime): + value = datetime_to_micros(value) + return _LONG_STRUCT.pack(value) + + +@to_bytes.register(DateType) +def _(_: DateType, value: Union[date, int]) -> bytes: + if isinstance(value, date): + value = date_to_days(value) + return _INT_STRUCT.pack(value) + + +@to_bytes.register(TimeType) +def _(_: TimeType, value: Union[time, int]) -> bytes: + if isinstance(value, time): + value = time_to_micros(value) + return _LONG_STRUCT.pack(value) + + +@to_bytes.register(FloatType) +def _(_: FloatType, value: float) -> bytes: + """Convert a float value into bytes. + + Note: float in python is implemented using a double in C. Therefore this involves a conversion of a 32-bit (single precision) + float to a 64-bit (double precision) float which introduces some imprecision. + """ + return _FLOAT_STRUCT.pack(value) + + +@to_bytes.register(DoubleType) +def _(_: DoubleType, value: float) -> bytes: + return _DOUBLE_STRUCT.pack(value) + + +@to_bytes.register(StringType) +def _(_: StringType, value: str) -> bytes: + return value.encode("UTF-8") + + +@to_bytes.register(UUIDType) +def _(_: UUIDType, value: Union[uuid.UUID, bytes]) -> bytes: + if isinstance(value, bytes): + return value + return value.bytes + + +@to_bytes.register(BinaryType) +@to_bytes.register(FixedType) +def _(_: PrimitiveType, value: bytes) -> bytes: + return value + + +@to_bytes.register(DecimalType) +def _(primitive_type: DecimalType, value: Decimal) -> bytes: + """Convert a Decimal value to bytes given a DecimalType instance with defined precision and scale. + + Args: + primitive_type (DecimalType): A DecimalType instance with precision and scale. + value (Decimal): A Decimal instance. + + Raises: + ValueError: If either the precision or scale of `value` does not match that defined in the DecimalType instance. + + + Returns: + bytes: The byte representation of `value`. + """ + _, digits, exponent = value.as_tuple() + exponent = abs(int(exponent)) + if exponent != primitive_type.scale: + raise ValueError(f"Cannot serialize value, scale of value does not match type {primitive_type}: {exponent}") + elif len(digits) > primitive_type.precision: + raise ValueError( + f"Cannot serialize value, precision of value is greater than precision of type {primitive_type}: {len(digits)}" + ) + + return decimal_to_bytes(value) + + +@singledispatch +def from_bytes(primitive_type: PrimitiveType, b: bytes) -> L: + """Convert bytes to a built-in python value. + + Args: + primitive_type (PrimitiveType): An implementation of the PrimitiveType base class. + b (bytes): The bytes to convert. + """ + raise TypeError(f"Cannot deserialize bytes, type {primitive_type} not supported: {str(b)}") + + +@from_bytes.register(BooleanType) +def _(_: BooleanType, b: bytes) -> bool: + return _BOOL_STRUCT.unpack(b)[0] != 0 + + +@from_bytes.register(IntegerType) +@from_bytes.register(DateType) +def _(_: PrimitiveType, b: bytes) -> int: + return _INT_STRUCT.unpack(b)[0] + + +@from_bytes.register(LongType) +@from_bytes.register(TimeType) +@from_bytes.register(TimestampType) +@from_bytes.register(TimestamptzType) +def _(_: PrimitiveType, b: bytes) -> int: + return _LONG_STRUCT.unpack(b)[0] + + +@from_bytes.register(FloatType) +def _(_: FloatType, b: bytes) -> float: + return _FLOAT_STRUCT.unpack(b)[0] + + +@from_bytes.register(DoubleType) +def _(_: DoubleType, b: bytes) -> float: + return _DOUBLE_STRUCT.unpack(b)[0] + + +@from_bytes.register(StringType) +def _(_: StringType, b: bytes) -> str: + return bytes(b).decode("utf-8") + + +@from_bytes.register(BinaryType) +@from_bytes.register(FixedType) +@from_bytes.register(UUIDType) +def _(_: PrimitiveType, b: bytes) -> bytes: + return b + + +@from_bytes.register(DecimalType) +def _(primitive_type: DecimalType, buf: bytes) -> Decimal: + unscaled = int.from_bytes(buf, "big", signed=True) + return unscaled_to_decimal(unscaled, primitive_type.scale) diff --git a/pyiceberg/exceptions.py b/pyiceberg/exceptions.py new file mode 100644 index 0000000000..f555543723 --- /dev/null +++ b/pyiceberg/exceptions.py @@ -0,0 +1,112 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + + +class TableAlreadyExistsError(Exception): + """Raised when creating a table with a name that already exists.""" + + +class NamespaceNotEmptyError(Exception): + """Raised when a name-space being dropped is not empty.""" + + +class NamespaceAlreadyExistsError(Exception): + """Raised when a name-space being created already exists in the catalog.""" + + +class ValidationError(Exception): + """Raises when there is an issue with the schema.""" + + +class NoSuchTableError(Exception): + """Raises when the table can't be found in the REST catalog.""" + + +class NoSuchIcebergTableError(NoSuchTableError): + """Raises when the table found in the REST catalog is not an iceberg table.""" + + +class NoSuchNamespaceError(Exception): + """Raised when a referenced name-space is not found.""" + + +class RESTError(Exception): + """Raises when there is an unknown response from the REST Catalog.""" + + +class BadRequestError(RESTError): + """Raises when an invalid request is being made.""" + + +class UnauthorizedError(RESTError): + """Raises when you don't have the proper authorization.""" + + +class ServiceUnavailableError(RESTError): + """Raises when the service doesn't respond.""" + + +class ServerError(RESTError): + """Raises when there is an unhandled exception on the server side.""" + + +class ForbiddenError(RESTError): + """Raises when you don't have the credentials to perform the action on the REST catalog.""" + + +class AuthorizationExpiredError(RESTError): + """When the credentials are expired when performing an action on the REST catalog.""" + + +class OAuthError(RESTError): + """Raises when there is an error with the OAuth call.""" + + +class NoSuchPropertyException(Exception): + """When a property is missing.""" + + +class NotInstalledError(Exception): + """When an optional dependency is not installed.""" + + +class SignError(Exception): + """Raises when unable to sign a S3 request.""" + + +class ResolveError(Exception): + pass + + +class DynamoDbError(Exception): + pass + + +class ConditionalCheckFailedException(DynamoDbError): + pass + + +class GenericDynamoDbError(DynamoDbError): + pass + + +class CommitFailedException(RESTError): + """Commit failed, refresh and try again.""" + + +class CommitStateUnknownException(RESTError): + """Commit failed due to unknown reason.""" diff --git a/pyiceberg/expressions/__init__.py b/pyiceberg/expressions/__init__.py new file mode 100644 index 0000000000..f3dc6dcb36 --- /dev/null +++ b/pyiceberg/expressions/__init__.py @@ -0,0 +1,899 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from __future__ import annotations + +from abc import ABC, abstractmethod +from functools import cached_property, reduce +from typing import ( + Any, + Generic, + Iterable, + Set, + Tuple, + Type, + TypeVar, + Union, +) + +from pyiceberg.expressions.literals import ( + AboveMax, + BelowMin, + Literal, + literal, +) +from pyiceberg.schema import Accessor, Schema +from pyiceberg.typedef import L, StructProtocol +from pyiceberg.types import DoubleType, FloatType, NestedField +from pyiceberg.utils.singleton import Singleton + + +def _to_unbound_term(term: Union[str, UnboundTerm[Any]]) -> UnboundTerm[Any]: + return Reference(term) if isinstance(term, str) else term + + +def _to_literal_set(values: Union[Iterable[L], Iterable[Literal[L]]]) -> Set[Literal[L]]: + return {_to_literal(v) for v in values} + + +def _to_literal(value: Union[L, Literal[L]]) -> Literal[L]: + if isinstance(value, Literal): + return value + else: + return literal(value) + + +class BooleanExpression(ABC): + """An expression that evaluates to a boolean.""" + + @abstractmethod + def __invert__(self) -> BooleanExpression: + """Transform the Expression into its negated version.""" + + +class Term(Generic[L], ABC): + """A simple expression that evaluates to a value.""" + + +class Bound(ABC): + """Represents a bound value expression.""" + + +B = TypeVar("B") + + +class Unbound(Generic[B], ABC): + """Represents an unbound value expression.""" + + @abstractmethod + def bind(self, schema: Schema, case_sensitive: bool = True) -> B: + ... + + @property + @abstractmethod + def as_bound(self) -> Type[Bound]: + ... + + +class BoundTerm(Term[L], Bound, ABC): + """Represents a bound term.""" + + @abstractmethod + def ref(self) -> BoundReference[L]: + """Return the bound reference.""" + + @abstractmethod + def eval(self, struct: StructProtocol) -> L: # pylint: disable=W0613 + """Return the value at the referenced field's position in an object that abides by the StructProtocol.""" + + +class BoundReference(BoundTerm[L]): + """A reference bound to a field in a schema. + + Args: + field (NestedField): A referenced field in an Iceberg schema. + accessor (Accessor): An Accessor object to access the value at the field's position. + """ + + field: NestedField + accessor: Accessor + + def __init__(self, field: NestedField, accessor: Accessor): + self.field = field + self.accessor = accessor + + def eval(self, struct: StructProtocol) -> L: + """Return the value at the referenced field's position in an object that abides by the StructProtocol. + + Args: + struct (StructProtocol): A row object that abides by the StructProtocol and returns values given a position. + Returns: + Any: The value at the referenced field's position in `struct`. + """ + return self.accessor.get(struct) + + def __eq__(self, other: Any) -> bool: + """Return the equality of two instances of the BoundReference class.""" + return self.field == other.field if isinstance(other, BoundReference) else False + + def __repr__(self) -> str: + """Return the string representation of the BoundReference class.""" + return f"BoundReference(field={repr(self.field)}, accessor={repr(self.accessor)})" + + def ref(self) -> BoundReference[L]: + return self + + +class UnboundTerm(Term[Any], Unbound[BoundTerm[L]], ABC): + """Represents an unbound term.""" + + @abstractmethod + def bind(self, schema: Schema, case_sensitive: bool = True) -> BoundTerm[L]: + ... + + +class Reference(UnboundTerm[Any]): + """A reference not yet bound to a field in a schema. + + Args: + name (str): The name of the field. + + Note: + An unbound reference is sometimes referred to as a "named" reference. + """ + + name: str + + def __init__(self, name: str) -> None: + self.name = name + + def __repr__(self) -> str: + """Return the string representation of the Reference class.""" + return f"Reference(name={repr(self.name)})" + + def __eq__(self, other: Any) -> bool: + """Return the equality of two instances of the Reference class.""" + return self.name == other.name if isinstance(other, Reference) else False + + def bind(self, schema: Schema, case_sensitive: bool = True) -> BoundReference[L]: + """Bind the reference to an Iceberg schema. + + Args: + schema (Schema): An Iceberg schema. + case_sensitive (bool): Whether to consider case when binding the reference to the field. + + Raises: + ValueError: If an empty name is provided. + + Returns: + BoundReference: A reference bound to the specific field in the Iceberg schema. + """ + field = schema.find_field(name_or_id=self.name, case_sensitive=case_sensitive) + accessor = schema.accessor_for_field(field.field_id) + return self.as_bound(field=field, accessor=accessor) # type: ignore + + @property + def as_bound(self) -> Type[BoundReference[L]]: + return BoundReference[L] + + +class And(BooleanExpression): + """AND operation expression - logical conjunction.""" + + left: BooleanExpression + right: BooleanExpression + + def __new__(cls, left: BooleanExpression, right: BooleanExpression, *rest: BooleanExpression) -> BooleanExpression: # type: ignore + if rest: + return reduce(And, (left, right, *rest)) + if left is AlwaysFalse() or right is AlwaysFalse(): + return AlwaysFalse() + elif left is AlwaysTrue(): + return right + elif right is AlwaysTrue(): + return left + else: + obj = super().__new__(cls) + obj.left = left + obj.right = right + return obj + + def __eq__(self, other: Any) -> bool: + """Return the equality of two instances of the And class.""" + return self.left == other.left and self.right == other.right if isinstance(other, And) else False + + def __str__(self) -> str: + """Return the string representation of the And class.""" + return f"And(left={str(self.left)}, right={str(self.right)})" + + def __repr__(self) -> str: + """Return the string representation of the And class.""" + return f"And(left={repr(self.left)}, right={repr(self.right)})" + + def __invert__(self) -> BooleanExpression: + """Transform the Expression into its negated version.""" + # De Morgan's law: not (A and B) = (not A) or (not B) + return Or(~self.left, ~self.right) + + def __getnewargs__(self) -> Tuple[BooleanExpression, BooleanExpression]: + """Pickle the And class.""" + return (self.left, self.right) + + +class Or(BooleanExpression): + """OR operation expression - logical disjunction.""" + + left: BooleanExpression + right: BooleanExpression + + def __new__(cls, left: BooleanExpression, right: BooleanExpression, *rest: BooleanExpression) -> BooleanExpression: # type: ignore + if rest: + return reduce(Or, (left, right, *rest)) + if left is AlwaysTrue() or right is AlwaysTrue(): + return AlwaysTrue() + elif left is AlwaysFalse(): + return right + elif right is AlwaysFalse(): + return left + else: + obj = super().__new__(cls) + obj.left = left + obj.right = right + return obj + + def __eq__(self, other: Any) -> bool: + """Return the equality of two instances of the Or class.""" + return self.left == other.left and self.right == other.right if isinstance(other, Or) else False + + def __repr__(self) -> str: + """Return the string representation of the Or class.""" + return f"Or(left={repr(self.left)}, right={repr(self.right)})" + + def __invert__(self) -> BooleanExpression: + """Transform the Expression into its negated version.""" + # De Morgan's law: not (A or B) = (not A) and (not B) + return And(~self.left, ~self.right) + + def __getnewargs__(self) -> Tuple[BooleanExpression, BooleanExpression]: + """Pickle the Or class.""" + return (self.left, self.right) + + +class Not(BooleanExpression): + """NOT operation expression - logical negation.""" + + child: BooleanExpression + + def __new__(cls, child: BooleanExpression) -> BooleanExpression: # type: ignore + if child is AlwaysTrue(): + return AlwaysFalse() + elif child is AlwaysFalse(): + return AlwaysTrue() + elif isinstance(child, Not): + return child.child + obj = super().__new__(cls) + obj.child = child + return obj + + def __repr__(self) -> str: + """Return the string representation of the Not class.""" + return f"Not(child={repr(self.child)})" + + def __eq__(self, other: Any) -> bool: + """Return the equality of two instances of the Not class.""" + return self.child == other.child if isinstance(other, Not) else False + + def __invert__(self) -> BooleanExpression: + """Transform the Expression into its negated version.""" + return self.child + + def __getnewargs__(self) -> Tuple[BooleanExpression]: + """Pickle the Not class.""" + return (self.child,) + + +class AlwaysTrue(BooleanExpression, Singleton): + """TRUE expression.""" + + def __invert__(self) -> AlwaysFalse: + """Transform the Expression into its negated version.""" + return AlwaysFalse() + + def __str__(self) -> str: + """Return the string representation of the AlwaysTrue class.""" + return "AlwaysTrue()" + + def __repr__(self) -> str: + """Return the string representation of the AlwaysTrue class.""" + return "AlwaysTrue()" + + +class AlwaysFalse(BooleanExpression, Singleton): + """FALSE expression.""" + + def __invert__(self) -> AlwaysTrue: + """Transform the Expression into its negated version.""" + return AlwaysTrue() + + def __str__(self) -> str: + """Return the string representation of the AlwaysFalse class.""" + return "AlwaysFalse()" + + def __repr__(self) -> str: + """Return the string representation of the AlwaysFalse class.""" + return "AlwaysFalse()" + + +class BoundPredicate(Generic[L], Bound, BooleanExpression, ABC): + term: BoundTerm[L] + + def __init__(self, term: BoundTerm[L]): + self.term = term + + def __eq__(self, other: Any) -> bool: + """Return the equality of two instances of the BoundPredicate class.""" + if isinstance(other, BoundPredicate): + return self.term == other.term + return False + + @property + @abstractmethod + def as_unbound(self) -> Type[UnboundPredicate[Any]]: + ... + + +class UnboundPredicate(Generic[L], Unbound[BooleanExpression], BooleanExpression, ABC): + term: UnboundTerm[Any] + + def __init__(self, term: Union[str, UnboundTerm[Any]]): + self.term = _to_unbound_term(term) + + def __eq__(self, other: Any) -> bool: + """Return the equality of two instances of the UnboundPredicate class.""" + return self.term == other.term if isinstance(other, UnboundPredicate) else False + + @abstractmethod + def bind(self, schema: Schema, case_sensitive: bool = True) -> BooleanExpression: + ... + + @property + @abstractmethod + def as_bound(self) -> Type[BoundPredicate[L]]: + ... + + +class UnaryPredicate(UnboundPredicate[Any], ABC): + def bind(self, schema: Schema, case_sensitive: bool = True) -> BoundUnaryPredicate[Any]: + bound_term = self.term.bind(schema, case_sensitive) + return self.as_bound(bound_term) + + def __repr__(self) -> str: + """Return the string representation of the UnaryPredicate class.""" + return f"{str(self.__class__.__name__)}(term={repr(self.term)})" + + @property + @abstractmethod + def as_bound(self) -> Type[BoundUnaryPredicate[Any]]: + ... + + +class BoundUnaryPredicate(BoundPredicate[L], ABC): + def __repr__(self) -> str: + """Return the string representation of the BoundUnaryPredicate class.""" + return f"{str(self.__class__.__name__)}(term={repr(self.term)})" + + @property + @abstractmethod + def as_unbound(self) -> Type[UnaryPredicate]: + ... + + def __getnewargs__(self) -> Tuple[BoundTerm[L]]: + """Pickle the BoundUnaryPredicate class.""" + return (self.term,) + + +class BoundIsNull(BoundUnaryPredicate[L]): + def __new__(cls, term: BoundTerm[L]) -> BooleanExpression: # type: ignore # pylint: disable=W0221 + if term.ref().field.required: + return AlwaysFalse() + return super().__new__(cls) + + def __invert__(self) -> BoundNotNull[L]: + """Transform the Expression into its negated version.""" + return BoundNotNull(self.term) + + @property + def as_unbound(self) -> Type[IsNull]: + return IsNull + + +class BoundNotNull(BoundUnaryPredicate[L]): + def __new__(cls, term: BoundTerm[L]): # type: ignore # pylint: disable=W0221 + if term.ref().field.required: + return AlwaysTrue() + return super().__new__(cls) + + def __invert__(self) -> BoundIsNull[L]: + """Transform the Expression into its negated version.""" + return BoundIsNull(self.term) + + @property + def as_unbound(self) -> Type[NotNull]: + return NotNull + + +class IsNull(UnaryPredicate): + def __invert__(self) -> NotNull: + """Transform the Expression into its negated version.""" + return NotNull(self.term) + + @property + def as_bound(self) -> Type[BoundIsNull[L]]: + return BoundIsNull[L] + + +class NotNull(UnaryPredicate): + def __invert__(self) -> IsNull: + """Transform the Expression into its negated version.""" + return IsNull(self.term) + + @property + def as_bound(self) -> Type[BoundNotNull[L]]: + return BoundNotNull[L] + + +class BoundIsNaN(BoundUnaryPredicate[L]): + def __new__(cls, term: BoundTerm[L]) -> BooleanExpression: # type: ignore # pylint: disable=W0221 + bound_type = term.ref().field.field_type + if type(bound_type) in {FloatType, DoubleType}: + return super().__new__(cls) + return AlwaysFalse() + + def __invert__(self) -> BoundNotNaN[L]: + """Transform the Expression into its negated version.""" + return BoundNotNaN(self.term) + + @property + def as_unbound(self) -> Type[IsNaN]: + return IsNaN + + +class BoundNotNaN(BoundUnaryPredicate[L]): + def __new__(cls, term: BoundTerm[L]) -> BooleanExpression: # type: ignore # pylint: disable=W0221 + bound_type = term.ref().field.field_type + if type(bound_type) in {FloatType, DoubleType}: + return super().__new__(cls) + return AlwaysTrue() + + def __invert__(self) -> BoundIsNaN[L]: + """Transform the Expression into its negated version.""" + return BoundIsNaN(self.term) + + @property + def as_unbound(self) -> Type[NotNaN]: + return NotNaN + + +class IsNaN(UnaryPredicate): + def __invert__(self) -> NotNaN: + """Transform the Expression into its negated version.""" + return NotNaN(self.term) + + @property + def as_bound(self) -> Type[BoundIsNaN[L]]: + return BoundIsNaN[L] + + +class NotNaN(UnaryPredicate): + def __invert__(self) -> IsNaN: + """Transform the Expression into its negated version.""" + return IsNaN(self.term) + + @property + def as_bound(self) -> Type[BoundNotNaN[L]]: + return BoundNotNaN[L] + + +class SetPredicate(UnboundPredicate[L], ABC): + literals: Set[Literal[L]] + + def __init__(self, term: Union[str, UnboundTerm[Any]], literals: Union[Iterable[L], Iterable[Literal[L]]]): + super().__init__(term) + self.literals = _to_literal_set(literals) + + def bind(self, schema: Schema, case_sensitive: bool = True) -> BoundSetPredicate[L]: + bound_term = self.term.bind(schema, case_sensitive) + return self.as_bound(bound_term, {lit.to(bound_term.ref().field.field_type) for lit in self.literals}) + + def __str__(self) -> str: + """Return the string representation of the SetPredicate class.""" + # Sort to make it deterministic + return f"{str(self.__class__.__name__)}({str(self.term)}, {{{', '.join(sorted([str(literal) for literal in self.literals]))}}})" + + def __repr__(self) -> str: + """Return the string representation of the SetPredicate class.""" + # Sort to make it deterministic + return f"{str(self.__class__.__name__)}({repr(self.term)}, {{{', '.join(sorted([repr(literal) for literal in self.literals]))}}})" + + def __eq__(self, other: Any) -> bool: + """Return the equality of two instances of the SetPredicate class.""" + return self.term == other.term and self.literals == other.literals if isinstance(other, SetPredicate) else False + + def __getnewargs__(self) -> Tuple[UnboundTerm[L], Set[Literal[L]]]: + """Pickle the SetPredicate class.""" + return (self.term, self.literals) + + @property + @abstractmethod + def as_bound(self) -> Type[BoundSetPredicate[L]]: + return BoundSetPredicate[L] + + +class BoundSetPredicate(BoundPredicate[L], ABC): + literals: Set[Literal[L]] + + def __init__(self, term: BoundTerm[L], literals: Set[Literal[L]]): + # Since we don't know the type of BoundPredicate[L], we have to ignore this one + super().__init__(term) # type: ignore + self.literals = _to_literal_set(literals) # pylint: disable=W0621 + + @cached_property + def value_set(self) -> Set[L]: + return {lit.value for lit in self.literals} + + def __str__(self) -> str: + """Return the string representation of the BoundSetPredicate class.""" + # Sort to make it deterministic + return f"{str(self.__class__.__name__)}({str(self.term)}, {{{', '.join(sorted([str(literal) for literal in self.literals]))}}})" + + def __repr__(self) -> str: + """Return the string representation of the BoundSetPredicate class.""" + # Sort to make it deterministic + return f"{str(self.__class__.__name__)}({repr(self.term)}, {{{', '.join(sorted([repr(literal) for literal in self.literals]))}}})" + + def __eq__(self, other: Any) -> bool: + """Return the equality of two instances of the BoundSetPredicate class.""" + return self.term == other.term and self.literals == other.literals if isinstance(other, BoundSetPredicate) else False + + def __getnewargs__(self) -> Tuple[BoundTerm[L], Set[Literal[L]]]: + """Pickle the BoundSetPredicate class.""" + return (self.term, self.literals) + + @property + @abstractmethod + def as_unbound(self) -> Type[SetPredicate[L]]: + ... + + +class BoundIn(BoundSetPredicate[L]): + def __new__(cls, term: BoundTerm[L], literals: Set[Literal[L]]) -> BooleanExpression: # type: ignore # pylint: disable=W0221 + count = len(literals) + if count == 0: + return AlwaysFalse() + elif count == 1: + return BoundEqualTo(term, next(iter(literals))) + else: + return super().__new__(cls) + + def __invert__(self) -> BoundNotIn[L]: + """Transform the Expression into its negated version.""" + return BoundNotIn(self.term, self.literals) + + def __eq__(self, other: Any) -> bool: + """Return the equality of two instances of the BoundIn class.""" + return self.term == other.term and self.literals == other.literals if isinstance(other, BoundIn) else False + + @property + def as_unbound(self) -> Type[In[L]]: + return In + + +class BoundNotIn(BoundSetPredicate[L]): + def __new__( # type: ignore # pylint: disable=W0221 + cls, + term: BoundTerm[L], + literals: Set[Literal[L]], + ) -> BooleanExpression: + count = len(literals) + if count == 0: + return AlwaysTrue() + elif count == 1: + return BoundNotEqualTo(term, next(iter(literals))) + else: + return super().__new__(cls) + + def __invert__(self) -> BoundIn[L]: + """Transform the Expression into its negated version.""" + return BoundIn(self.term, self.literals) + + @property + def as_unbound(self) -> Type[NotIn[L]]: + return NotIn + + +class In(SetPredicate[L]): + def __new__( # type: ignore # pylint: disable=W0221 + cls, term: Union[str, UnboundTerm[Any]], literals: Union[Iterable[L], Iterable[Literal[L]]] + ) -> BooleanExpression: + literals_set: Set[Literal[L]] = _to_literal_set(literals) + count = len(literals_set) + if count == 0: + return AlwaysFalse() + elif count == 1: + return EqualTo(term, next(iter(literals))) # type: ignore + else: + return super().__new__(cls) + + def __invert__(self) -> NotIn[L]: + """Transform the Expression into its negated version.""" + return NotIn[L](self.term, self.literals) + + @property + def as_bound(self) -> Type[BoundIn[L]]: + return BoundIn[L] + + +class NotIn(SetPredicate[L], ABC): + def __new__( # type: ignore # pylint: disable=W0221 + cls, term: Union[str, UnboundTerm[Any]], literals: Union[Iterable[L], Iterable[Literal[L]]] + ) -> BooleanExpression: + literals_set: Set[Literal[L]] = _to_literal_set(literals) + count = len(literals_set) + if count == 0: + return AlwaysTrue() + elif count == 1: + return NotEqualTo(term, next(iter(literals_set))) + else: + return super().__new__(cls) + + def __invert__(self) -> In[L]: + """Transform the Expression into its negated version.""" + return In[L](self.term, self.literals) + + def __eq__(self, other: Any) -> bool: + """Return the equality of two instances of the NotIn class.""" + if isinstance(other, NotIn): + return self.term == other.term and self.literals == other.literals + return False + + @property + def as_bound(self) -> Type[BoundNotIn[L]]: + return BoundNotIn[L] + + +class LiteralPredicate(UnboundPredicate[L], ABC): + literal: Literal[L] + + def __init__(self, term: Union[str, UnboundTerm[Any]], literal: Union[L, Literal[L]]): # pylint: disable=W0621 + super().__init__(term) + self.literal = _to_literal(literal) # pylint: disable=W0621 + + def bind(self, schema: Schema, case_sensitive: bool = True) -> BoundLiteralPredicate[L]: + bound_term = self.term.bind(schema, case_sensitive) + lit = self.literal.to(bound_term.ref().field.field_type) + + if isinstance(lit, AboveMax): + if isinstance(self, (LessThan, LessThanOrEqual, NotEqualTo)): + return AlwaysTrue() # type: ignore + elif isinstance(self, (GreaterThan, GreaterThanOrEqual, EqualTo)): + return AlwaysFalse() # type: ignore + elif isinstance(lit, BelowMin): + if isinstance(self, (GreaterThan, GreaterThanOrEqual, NotEqualTo)): + return AlwaysTrue() # type: ignore + elif isinstance(self, (LessThan, LessThanOrEqual, EqualTo)): + return AlwaysFalse() # type: ignore + + return self.as_bound(bound_term, lit) + + def __eq__(self, other: Any) -> bool: + """Return the equality of two instances of the LiteralPredicate class.""" + if isinstance(other, LiteralPredicate): + return self.term == other.term and self.literal == other.literal + return False + + def __repr__(self) -> str: + """Return the string representation of the LiteralPredicate class.""" + return f"{str(self.__class__.__name__)}(term={repr(self.term)}, literal={repr(self.literal)})" + + @property + @abstractmethod + def as_bound(self) -> Type[BoundLiteralPredicate[L]]: + ... + + +class BoundLiteralPredicate(BoundPredicate[L], ABC): + literal: Literal[L] + + def __init__(self, term: BoundTerm[L], literal: Literal[L]): # pylint: disable=W0621 + # Since we don't know the type of BoundPredicate[L], we have to ignore this one + super().__init__(term) # type: ignore + self.literal = literal # pylint: disable=W0621 + + def __eq__(self, other: Any) -> bool: + """Return the equality of two instances of the BoundLiteralPredicate class.""" + if isinstance(other, BoundLiteralPredicate): + return self.term == other.term and self.literal == other.literal + return False + + def __repr__(self) -> str: + """Return the string representation of the BoundLiteralPredicate class.""" + return f"{str(self.__class__.__name__)}(term={repr(self.term)}, literal={repr(self.literal)})" + + @property + @abstractmethod + def as_unbound(self) -> Type[LiteralPredicate[L]]: + ... + + +class BoundEqualTo(BoundLiteralPredicate[L]): + def __invert__(self) -> BoundNotEqualTo[L]: + """Transform the Expression into its negated version.""" + return BoundNotEqualTo[L](self.term, self.literal) + + @property + def as_unbound(self) -> Type[EqualTo[L]]: + return EqualTo + + +class BoundNotEqualTo(BoundLiteralPredicate[L]): + def __invert__(self) -> BoundEqualTo[L]: + """Transform the Expression into its negated version.""" + return BoundEqualTo[L](self.term, self.literal) + + @property + def as_unbound(self) -> Type[NotEqualTo[L]]: + return NotEqualTo + + +class BoundGreaterThanOrEqual(BoundLiteralPredicate[L]): + def __invert__(self) -> BoundLessThan[L]: + """Transform the Expression into its negated version.""" + return BoundLessThan[L](self.term, self.literal) + + @property + def as_unbound(self) -> Type[GreaterThanOrEqual[L]]: + return GreaterThanOrEqual[L] + + +class BoundGreaterThan(BoundLiteralPredicate[L]): + def __invert__(self) -> BoundLessThanOrEqual[L]: + """Transform the Expression into its negated version.""" + return BoundLessThanOrEqual(self.term, self.literal) + + @property + def as_unbound(self) -> Type[GreaterThan[L]]: + return GreaterThan[L] + + +class BoundLessThan(BoundLiteralPredicate[L]): + def __invert__(self) -> BoundGreaterThanOrEqual[L]: + """Transform the Expression into its negated version.""" + return BoundGreaterThanOrEqual[L](self.term, self.literal) + + @property + def as_unbound(self) -> Type[LessThan[L]]: + return LessThan[L] + + +class BoundLessThanOrEqual(BoundLiteralPredicate[L]): + def __invert__(self) -> BoundGreaterThan[L]: + """Transform the Expression into its negated version.""" + return BoundGreaterThan[L](self.term, self.literal) + + @property + def as_unbound(self) -> Type[LessThanOrEqual[L]]: + return LessThanOrEqual[L] + + +class BoundStartsWith(BoundLiteralPredicate[L]): + def __invert__(self) -> BoundNotStartsWith[L]: + """Transform the Expression into its negated version.""" + return BoundNotStartsWith[L](self.term, self.literal) + + @property + def as_unbound(self) -> Type[StartsWith[L]]: + return StartsWith[L] + + +class BoundNotStartsWith(BoundLiteralPredicate[L]): + def __invert__(self) -> BoundStartsWith[L]: + """Transform the Expression into its negated version.""" + return BoundStartsWith[L](self.term, self.literal) + + @property + def as_unbound(self) -> Type[NotStartsWith[L]]: + return NotStartsWith[L] + + +class EqualTo(LiteralPredicate[L]): + def __invert__(self) -> NotEqualTo[L]: + """Transform the Expression into its negated version.""" + return NotEqualTo[L](self.term, self.literal) + + @property + def as_bound(self) -> Type[BoundEqualTo[L]]: + return BoundEqualTo[L] + + +class NotEqualTo(LiteralPredicate[L]): + def __invert__(self) -> EqualTo[L]: + """Transform the Expression into its negated version.""" + return EqualTo[L](self.term, self.literal) + + @property + def as_bound(self) -> Type[BoundNotEqualTo[L]]: + return BoundNotEqualTo[L] + + +class LessThan(LiteralPredicate[L]): + def __invert__(self) -> GreaterThanOrEqual[L]: + """Transform the Expression into its negated version.""" + return GreaterThanOrEqual[L](self.term, self.literal) + + @property + def as_bound(self) -> Type[BoundLessThan[L]]: + return BoundLessThan[L] + + +class GreaterThanOrEqual(LiteralPredicate[L]): + def __invert__(self) -> LessThan[L]: + """Transform the Expression into its negated version.""" + return LessThan[L](self.term, self.literal) + + @property + def as_bound(self) -> Type[BoundGreaterThanOrEqual[L]]: + return BoundGreaterThanOrEqual[L] + + +class GreaterThan(LiteralPredicate[L]): + def __invert__(self) -> LessThanOrEqual[L]: + """Transform the Expression into its negated version.""" + return LessThanOrEqual[L](self.term, self.literal) + + @property + def as_bound(self) -> Type[BoundGreaterThan[L]]: + return BoundGreaterThan[L] + + +class LessThanOrEqual(LiteralPredicate[L]): + def __invert__(self) -> GreaterThan[L]: + """Transform the Expression into its negated version.""" + return GreaterThan[L](self.term, self.literal) + + @property + def as_bound(self) -> Type[BoundLessThanOrEqual[L]]: + return BoundLessThanOrEqual[L] + + +class StartsWith(LiteralPredicate[L]): + def __invert__(self) -> NotStartsWith[L]: + """Transform the Expression into its negated version.""" + return NotStartsWith[L](self.term, self.literal) + + @property + def as_bound(self) -> Type[BoundStartsWith[L]]: + return BoundStartsWith[L] + + +class NotStartsWith(LiteralPredicate[L]): + def __invert__(self) -> NotStartsWith[L]: + """Transform the Expression into its negated version.""" + return NotStartsWith[L](self.term, self.literal) + + @property + def as_bound(self) -> Type[BoundNotStartsWith[L]]: + return BoundNotStartsWith[L] diff --git a/pyiceberg/expressions/literals.py b/pyiceberg/expressions/literals.py new file mode 100644 index 0000000000..6d65058897 --- /dev/null +++ b/pyiceberg/expressions/literals.py @@ -0,0 +1,673 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# pylint: disable=W0613 +from __future__ import annotations + +import struct +from abc import ABC, abstractmethod +from decimal import ROUND_HALF_UP, Decimal +from functools import singledispatchmethod +from math import isnan +from typing import Any, Generic, Type +from uuid import UUID + +from pyiceberg.typedef import L +from pyiceberg.types import ( + BinaryType, + BooleanType, + DateType, + DecimalType, + DoubleType, + FixedType, + FloatType, + IcebergType, + IntegerType, + LongType, + StringType, + TimestampType, + TimestamptzType, + TimeType, + UUIDType, +) +from pyiceberg.utils.datetime import ( + date_str_to_days, + micros_to_days, + time_str_to_micros, + timestamp_to_micros, + timestamptz_to_micros, +) +from pyiceberg.utils.decimal import decimal_to_unscaled, unscaled_to_decimal +from pyiceberg.utils.singleton import Singleton + +UUID_BYTES_LENGTH = 16 + + +class Literal(Generic[L], ABC): + """Literal which has a value and can be converted between types.""" + + _value: L + + def __init__(self, value: L, value_type: Type[L]): + if value is None or not isinstance(value, value_type): + raise TypeError(f"Invalid literal value: {value!r} (not a {value_type})") + if isinstance(value, float) and isnan(value): + raise ValueError("Cannot create expression literal from NaN.") + self._value = value + + @property + def value(self) -> L: + return self._value + + @singledispatchmethod + @abstractmethod + def to(self, type_var: IcebergType) -> Literal[L]: + ... # pragma: no cover + + def __repr__(self) -> str: + """Return the string representation of the Literal class.""" + return f"{type(self).__name__}({self.value!r})" + + def __str__(self) -> str: + """Return the string representation of the Literal class.""" + return str(self.value) + + def __hash__(self) -> int: + """Return a hashed representation of the Literal class.""" + return hash(self.value) + + def __eq__(self, other: Any) -> bool: + """Return the equality of two instances of the Literal class.""" + if not isinstance(other, Literal): + return False + return self.value == other.value + + def __ne__(self, other: Any) -> bool: + """Return the inequality of two instances of the Literal class.""" + return not self.__eq__(other) + + def __lt__(self, other: Any) -> bool: + """Return if one instance of the Literal class is less than another instance.""" + return self.value < other.value + + def __gt__(self, other: Any) -> bool: + """Return if one instance of the Literal class is greater than another instance.""" + return self.value > other.value + + def __le__(self, other: Any) -> bool: + """Return if one instance of the Literal class is less than or equal to another instance.""" + return self.value <= other.value + + def __ge__(self, other: Any) -> bool: + """Return if one instance of the Literal class is greater than or equal to another instance.""" + return self.value >= other.value + + +def literal(value: L) -> Literal[L]: + """ + Construct an Iceberg Literal based on Python primitive data type. + + Args: + value (Python primitive type): the value to be associated with literal. + + Example: + from pyiceberg.expressions.literals import literal. + >>> literal(123) + LongLiteral(123) + """ + if isinstance(value, float): + return DoubleLiteral(value) # type: ignore + elif isinstance(value, bool): + return BooleanLiteral(value) + elif isinstance(value, int): + return LongLiteral(value) + elif isinstance(value, str): + return StringLiteral(value) + elif isinstance(value, UUID): + return UUIDLiteral(value.bytes) # type: ignore + elif isinstance(value, bytes): + return BinaryLiteral(value) + elif isinstance(value, Decimal): + return DecimalLiteral(value) + else: + raise TypeError(f"Invalid literal value: {repr(value)}") + + +class AboveMax(Literal[L]): + def __repr__(self) -> str: + """Return the string representation of the AboveMax class.""" + return f"{self.__class__.__name__}()" + + def __str__(self) -> str: + """Return the string representation of the AboveMax class.""" + return self.__class__.__name__ + + +class BelowMin(Literal[L]): + def __repr__(self) -> str: + """Return the string representation of the BelowMin class.""" + return f"{self.__class__.__name__}()" + + def __str__(self) -> str: + """Return the string representation of the BelowMin class.""" + return self.__class__.__name__ + + +class FloatAboveMax(AboveMax[float], Singleton): + def __init__(self) -> None: + super().__init__(FloatType.max, float) + + @singledispatchmethod + def to(self, type_var: IcebergType) -> Literal: # type: ignore + raise TypeError("Cannot change the type of FloatAboveMax") + + @to.register(FloatType) + def _(self, _: FloatType) -> Literal[float]: + return self + + +class FloatBelowMin(BelowMin[float], Singleton): + def __init__(self) -> None: + super().__init__(FloatType.min, float) + + @singledispatchmethod + def to(self, type_var: IcebergType) -> Literal: # type: ignore + raise TypeError("Cannot change the type of FloatBelowMin") + + @to.register(FloatType) + def _(self, _: FloatType) -> Literal[float]: + return self + + +class IntAboveMax(AboveMax[int], Singleton): + def __init__(self) -> None: + super().__init__(IntegerType.max, int) + + @singledispatchmethod + def to(self, type_var: IcebergType) -> Literal: # type: ignore + raise TypeError("Cannot change the type of IntAboveMax") + + @to.register(IntegerType) + def _(self, _: IntegerType) -> Literal[int]: + return self + + +class IntBelowMin(BelowMin[int], Singleton): + def __init__(self) -> None: + super().__init__(IntegerType.min, int) + + @singledispatchmethod + def to(self, type_var: IcebergType) -> Literal: # type: ignore + raise TypeError("Cannot change the type of IntBelowMin") + + @to.register(IntegerType) + def _(self, _: IntegerType) -> Literal[int]: + return self + + +class LongAboveMax(AboveMax[int], Singleton): + def __init__(self) -> None: + super().__init__(LongType.max, int) + + @singledispatchmethod + def to(self, type_var: IcebergType) -> Literal: # type: ignore + raise TypeError("Cannot change the type of IntAboveMax") + + @to.register(LongType) + def _(self, _: LongType) -> Literal[int]: + return self + + +class LongBelowMin(BelowMin[int], Singleton): + def __init__(self) -> None: + super().__init__(LongType.min, int) + + @singledispatchmethod + def to(self, type_var: IcebergType) -> Literal: # type: ignore + raise TypeError("Cannot change the type of IntBelowMin") + + @to.register(LongType) + def _(self, _: LongType) -> Literal[int]: + return self + + +class BooleanLiteral(Literal[bool]): + def __init__(self, value: bool) -> None: + super().__init__(value, bool) + + @singledispatchmethod + def to(self, type_var: IcebergType) -> Literal[bool]: # type: ignore + raise TypeError(f"Cannot convert BooleanLiteral into {type_var}") + + @to.register(BooleanType) + def _(self, _: BooleanType) -> Literal[bool]: + return self + + +class LongLiteral(Literal[int]): + def __init__(self, value: int) -> None: + super().__init__(value, int) + + @singledispatchmethod + def to(self, type_var: IcebergType) -> Literal: # type: ignore + raise TypeError(f"Cannot convert LongLiteral into {type_var}") + + def increment(self) -> Literal[int]: + return LongLiteral(self.value + 1) + + def decrement(self) -> Literal[int]: + return LongLiteral(self.value - 1) + + @to.register(LongType) + def _(self, _: LongType) -> Literal[int]: + if LongType.max < self.value: + return LongAboveMax() + elif LongType.min > self.value: + return LongBelowMin() + else: + return self + + @to.register(IntegerType) + def _(self, _: IntegerType) -> Literal[int]: + if IntegerType.max < self.value: + return IntAboveMax() + elif IntegerType.min > self.value: + return IntBelowMin() + return self + + @to.register(FloatType) + def _(self, _: FloatType) -> Literal[float]: + return FloatLiteral(float(self.value)) + + @to.register(DoubleType) + def _(self, _: DoubleType) -> Literal[float]: + return DoubleLiteral(float(self.value)) + + @to.register(DateType) + def _(self, _: DateType) -> Literal[int]: + return DateLiteral(self.value) + + @to.register(TimeType) + def _(self, _: TimeType) -> Literal[int]: + return TimeLiteral(self.value) + + @to.register(TimestampType) + def _(self, _: TimestampType) -> Literal[int]: + return TimestampLiteral(self.value) + + @to.register(DecimalType) + def _(self, type_var: DecimalType) -> Literal[Decimal]: + unscaled = Decimal(self.value) + if type_var.scale == 0: + return DecimalLiteral(unscaled) + else: + sign, digits, _ = unscaled.as_tuple() + zeros = (0,) * type_var.scale + return DecimalLiteral(Decimal((sign, digits + zeros, -type_var.scale))) + + +class FloatLiteral(Literal[float]): + def __init__(self, value: float) -> None: + super().__init__(value, float) + self._value32 = struct.unpack(" bool: + """Return the equality of two instances of the FloatLiteral class.""" + return self._value32 == other + + def __lt__(self, other: Any) -> bool: + """Return if one instance of the FloatLiteral class is less than another instance.""" + return self._value32 < other + + def __gt__(self, other: Any) -> bool: + """Return if one instance of the FloatLiteral class is greater than another instance.""" + return self._value32 > other + + def __le__(self, other: Any) -> bool: + """Return if one instance of the FloatLiteral class is less than or equal to another instance.""" + return self._value32 <= other + + def __ge__(self, other: Any) -> bool: + """Return if one instance of the FloatLiteral class is greater than or equal to another instance.""" + return self._value32 >= other + + def __hash__(self) -> int: + """Return a hashed representation of the FloatLiteral class.""" + return hash(self._value32) + + @singledispatchmethod + def to(self, type_var: IcebergType) -> Literal: # type: ignore + raise TypeError(f"Cannot convert FloatLiteral into {type_var}") + + @to.register(FloatType) + def _(self, _: FloatType) -> Literal[float]: + return self + + @to.register(DoubleType) + def _(self, _: DoubleType) -> Literal[float]: + return DoubleLiteral(self.value) + + @to.register(DecimalType) + def _(self, type_var: DecimalType) -> Literal[Decimal]: + return DecimalLiteral(Decimal(self.value).quantize(Decimal((0, (1,), -type_var.scale)), rounding=ROUND_HALF_UP)) + + +class DoubleLiteral(Literal[float]): + def __init__(self, value: float) -> None: + super().__init__(value, float) + + @singledispatchmethod + def to(self, type_var: IcebergType) -> Literal: # type: ignore + raise TypeError(f"Cannot convert DoubleLiteral into {type_var}") + + @to.register(DoubleType) + def _(self, _: DoubleType) -> Literal[float]: + return self + + @to.register(FloatType) + def _(self, _: FloatType) -> Literal[float]: + if FloatType.max < self.value: + return FloatAboveMax() + elif FloatType.min > self.value: + return FloatBelowMin() + return FloatLiteral(self.value) + + @to.register(DecimalType) + def _(self, type_var: DecimalType) -> Literal[Decimal]: + return DecimalLiteral(Decimal(self.value).quantize(Decimal((0, (1,), -type_var.scale)), rounding=ROUND_HALF_UP)) + + +class DateLiteral(Literal[int]): + def __init__(self, value: int) -> None: + super().__init__(value, int) + + def increment(self) -> Literal[int]: + return DateLiteral(self.value + 1) + + def decrement(self) -> Literal[int]: + return DateLiteral(self.value - 1) + + @singledispatchmethod + def to(self, type_var: IcebergType) -> Literal: # type: ignore + raise TypeError(f"Cannot convert DateLiteral into {type_var}") + + @to.register(DateType) + def _(self, _: DateType) -> Literal[int]: + return self + + +class TimeLiteral(Literal[int]): + def __init__(self, value: int) -> None: + super().__init__(value, int) + + @singledispatchmethod + def to(self, type_var: IcebergType) -> Literal: # type: ignore + raise TypeError(f"Cannot convert TimeLiteral into {type_var}") + + @to.register(TimeType) + def _(self, _: TimeType) -> Literal[int]: + return self + + +class TimestampLiteral(Literal[int]): + def __init__(self, value: int) -> None: + super().__init__(value, int) + + def increment(self) -> Literal[int]: + return TimestampLiteral(self.value + 1) + + def decrement(self) -> Literal[int]: + return TimestampLiteral(self.value - 1) + + @singledispatchmethod + def to(self, type_var: IcebergType) -> Literal: # type: ignore + raise TypeError(f"Cannot convert TimestampLiteral into {type_var}") + + @to.register(TimestampType) + def _(self, _: TimestampType) -> Literal[int]: + return self + + @to.register(TimestamptzType) + def _(self, _: TimestamptzType) -> Literal[int]: + return self + + @to.register(DateType) + def _(self, _: DateType) -> Literal[int]: + return DateLiteral(micros_to_days(self.value)) + + +class DecimalLiteral(Literal[Decimal]): + def __init__(self, value: Decimal) -> None: + super().__init__(value, Decimal) + + def increment(self) -> Literal[Decimal]: + original_scale = abs(int(self.value.as_tuple().exponent)) + unscaled = decimal_to_unscaled(self.value) + return DecimalLiteral(unscaled_to_decimal(unscaled + 1, original_scale)) + + def decrement(self) -> Literal[Decimal]: + original_scale = abs(int(self.value.as_tuple().exponent)) + unscaled = decimal_to_unscaled(self.value) + return DecimalLiteral(unscaled_to_decimal(unscaled - 1, original_scale)) + + @singledispatchmethod + def to(self, type_var: IcebergType) -> Literal: # type: ignore + raise TypeError(f"Cannot convert DecimalLiteral into {type_var}") + + @to.register(DecimalType) + def _(self, type_var: DecimalType) -> Literal[Decimal]: + if type_var.scale == abs(int(self.value.as_tuple().exponent)): + return self + raise ValueError(f"Could not convert {self.value} into a {type_var}") + + @to.register(IntegerType) + def _(self, _: IntegerType) -> Literal[int]: + value_int = int(self.value.to_integral_value()) + if value_int > IntegerType.max: + return IntAboveMax() + elif value_int < IntegerType.min: + return IntBelowMin() + else: + return LongLiteral(value_int) + + @to.register(LongType) + def _(self, _: LongType) -> Literal[int]: + value_int = int(self.value.to_integral_value()) + if value_int > LongType.max: + return IntAboveMax() + elif value_int < LongType.min: + return IntBelowMin() + else: + return LongLiteral(value_int) + + @to.register(FloatType) + def _(self, _: FloatType) -> Literal[float]: + value_float = float(self.value) + if value_float > FloatType.max: + return FloatAboveMax() + elif value_float < FloatType.min: + return FloatBelowMin() + else: + return FloatLiteral(value_float) + + @to.register(DoubleType) + def _(self, _: DoubleLiteral) -> Literal[float]: + return DoubleLiteral(float(self.value)) + + +class StringLiteral(Literal[str]): + def __init__(self, value: str) -> None: + super().__init__(value, str) + + @singledispatchmethod + def to(self, type_var: IcebergType) -> Literal: # type: ignore + raise TypeError(f"Cannot convert StringLiteral into {type_var}") + + @to.register(StringType) + def _(self, _: StringType) -> Literal[str]: + return self + + @to.register(IntegerType) + def _(self, type_var: IntegerType) -> Literal[int]: + try: + number = int(float(self.value)) + + if IntegerType.max < number: + return IntAboveMax() + elif IntegerType.min > number: + return IntBelowMin() + return LongLiteral(number) + except ValueError as e: + raise ValueError(f"Could not convert {self.value} into a {type_var}") from e + + @to.register(LongType) + def _(self, type_var: LongType) -> Literal[int]: + try: + long_value = int(float(self.value)) + if LongType.max < long_value: + return LongAboveMax() + elif LongType.min > long_value: + return LongBelowMin() + else: + return LongLiteral(long_value) + except (TypeError, ValueError) as e: + raise ValueError(f"Could not convert {self.value} into a {type_var}") from e + + @to.register(DateType) + def _(self, type_var: DateType) -> Literal[int]: + try: + return DateLiteral(date_str_to_days(self.value)) + except (TypeError, ValueError) as e: + raise ValueError(f"Could not convert {self.value} into a {type_var}") from e + + @to.register(TimeType) + def _(self, type_var: TimeType) -> Literal[int]: + try: + return TimeLiteral(time_str_to_micros(self.value)) + except (TypeError, ValueError) as e: + raise ValueError(f"Could not convert {self.value} into a {type_var}") from e + + @to.register(TimestampType) + def _(self, _: TimestampType) -> Literal[int]: + return TimestampLiteral(timestamp_to_micros(self.value)) + + @to.register(TimestamptzType) + def _(self, _: TimestamptzType) -> Literal[int]: + return TimestampLiteral(timestamptz_to_micros(self.value)) + + @to.register(UUIDType) + def _(self, _: UUIDType) -> Literal[bytes]: + return UUIDLiteral(UUID(self.value).bytes) + + @to.register(DecimalType) + def _(self, type_var: DecimalType) -> Literal[Decimal]: + dec = Decimal(self.value) + scale = abs(int(dec.as_tuple().exponent)) + if type_var.scale == scale: + return DecimalLiteral(dec) + else: + raise ValueError(f"Could not convert {self.value} into a {type_var}, scales differ {type_var.scale} <> {scale}") + + @to.register(BooleanType) + def _(self, type_var: BooleanType) -> Literal[bool]: + value_upper = self.value.upper() + if value_upper in ["TRUE", "FALSE"]: + return BooleanLiteral(value_upper == "TRUE") + else: + raise ValueError(f"Could not convert {self.value} into a {type_var}") + + def __repr__(self) -> str: + """Return the string representation of the StringLiteral class.""" + return f"literal({repr(self.value)})" + + +class UUIDLiteral(Literal[bytes]): + def __init__(self, value: bytes) -> None: + super().__init__(value, bytes) + + @singledispatchmethod + def to(self, type_var: IcebergType) -> Literal: # type: ignore + raise TypeError(f"Cannot convert UUIDLiteral into {type_var}") + + @to.register(UUIDType) + def _(self, _: UUIDType) -> Literal[bytes]: + return self + + +class FixedLiteral(Literal[bytes]): + def __init__(self, value: bytes) -> None: + super().__init__(value, bytes) + + @singledispatchmethod + def to(self, type_var: IcebergType) -> Literal: # type: ignore + raise TypeError(f"Cannot convert FixedLiteral into {type_var}") + + @to.register(FixedType) + def _(self, type_var: FixedType) -> Literal[bytes]: + if len(self.value) == len(type_var): + return self + else: + raise ValueError( + f"Could not convert {self.value!r} into a {type_var}, lengths differ {len(self.value)} <> {len(type_var)}" + ) + + @to.register(BinaryType) + def _(self, _: BinaryType) -> Literal[bytes]: + return BinaryLiteral(self.value) + + @to.register(UUIDType) + def _(self, type_var: UUIDType) -> Literal[bytes]: + if len(self.value) == UUID_BYTES_LENGTH: + return UUIDLiteral(self.value) + else: + raise TypeError( + f"Could not convert {self.value!r} into a {type_var}, lengths differ {len(self.value)} <> {UUID_BYTES_LENGTH}" + ) + + +class BinaryLiteral(Literal[bytes]): + def __init__(self, value: bytes) -> None: + super().__init__(value, bytes) + + @singledispatchmethod + def to(self, type_var: IcebergType) -> Literal: # type: ignore + raise TypeError(f"Cannot convert BinaryLiteral into {type_var}") + + @to.register(BinaryType) + def _(self, _: BinaryType) -> Literal[bytes]: + return self + + @to.register(FixedType) + def _(self, type_var: FixedType) -> Literal[bytes]: + if len(type_var) == len(self.value): + return FixedLiteral(self.value) + else: + raise TypeError( + f"Cannot convert BinaryLiteral into {type_var}, different length: {len(type_var)} <> {len(self.value)}" + ) + + @to.register(UUIDType) + def _(self, type_var: UUIDType) -> Literal[bytes]: + if len(self.value) == UUID_BYTES_LENGTH: + return UUIDLiteral(self.value) + else: + raise TypeError( + f"Cannot convert BinaryLiteral into {type_var}, different length: {UUID_BYTES_LENGTH} <> {len(self.value)}" + ) diff --git a/pyiceberg/expressions/parser.py b/pyiceberg/expressions/parser.py new file mode 100644 index 0000000000..d6d5bdb794 --- /dev/null +++ b/pyiceberg/expressions/parser.py @@ -0,0 +1,255 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +from decimal import Decimal + +from pyparsing import ( + CaselessKeyword, + DelimitedList, + Group, + ParserElement, + ParseResults, + Suppress, + Word, + alphanums, + alphas, + infix_notation, + one_of, + opAssoc, + sgl_quoted_string, +) +from pyparsing.common import pyparsing_common as common + +from pyiceberg.expressions import ( + AlwaysFalse, + AlwaysTrue, + And, + BooleanExpression, + EqualTo, + GreaterThan, + GreaterThanOrEqual, + In, + IsNaN, + IsNull, + LessThan, + LessThanOrEqual, + Not, + NotEqualTo, + NotIn, + NotNaN, + NotNull, + NotStartsWith, + Or, + Reference, + StartsWith, +) +from pyiceberg.expressions.literals import ( + DecimalLiteral, + Literal, + LongLiteral, + StringLiteral, +) +from pyiceberg.typedef import L + +ParserElement.enablePackrat() + +AND = CaselessKeyword("and") +OR = CaselessKeyword("or") +NOT = CaselessKeyword("not") +IS = CaselessKeyword("is") +IN = CaselessKeyword("in") +NULL = CaselessKeyword("null") +NAN = CaselessKeyword("nan") +LIKE = CaselessKeyword("like") + +identifier = Word(alphas, alphanums + "_$").set_results_name("identifier") +column = DelimitedList(identifier, delim=".", combine=False).set_results_name("column") + + +@column.set_parse_action +def _(result: ParseResults) -> Reference: + return Reference(result.column[-1]) + + +boolean = one_of(["true", "false"], caseless=True).set_results_name("boolean") +string = sgl_quoted_string.set_results_name("raw_quoted_string") +decimal = common.real().set_results_name("decimal") +integer = common.signed_integer().set_results_name("integer") +literal = Group(string | decimal | integer).set_results_name("literal") +literal_set = Group(DelimitedList(string) | DelimitedList(decimal) | DelimitedList(integer)).set_results_name("literal_set") + + +@boolean.set_parse_action +def _(result: ParseResults) -> BooleanExpression: + if "true" == result.boolean.lower(): + return AlwaysTrue() + else: + return AlwaysFalse() + + +@string.set_parse_action +def _(result: ParseResults) -> Literal[str]: + return StringLiteral(result.raw_quoted_string[1:-1].replace("''", "'")) + + +@decimal.set_parse_action +def _(result: ParseResults) -> Literal[Decimal]: + return DecimalLiteral(Decimal(result.decimal)) + + +@integer.set_parse_action +def _(result: ParseResults) -> Literal[int]: + return LongLiteral(int(result.integer)) + + +@literal.set_parse_action +def _(result: ParseResults) -> Literal[L]: + return result[0][0] + + +@literal_set.set_parse_action +def _(result: ParseResults) -> Literal[L]: + return result[0] + + +comparison_op = one_of(["<", "<=", ">", ">=", "=", "==", "!=", "<>"], caseless=True).set_results_name("op") +left_ref = column + comparison_op + literal +right_ref = literal + comparison_op + column +comparison = left_ref | right_ref + + +@left_ref.set_parse_action +def _(result: ParseResults) -> BooleanExpression: + if result.op == "<": + return LessThan(result.column, result.literal) + elif result.op == "<=": + return LessThanOrEqual(result.column, result.literal) + elif result.op == ">": + return GreaterThan(result.column, result.literal) + elif result.op == ">=": + return GreaterThanOrEqual(result.column, result.literal) + if result.op in ("=", "=="): + return EqualTo(result.column, result.literal) + if result.op in ("!=", "<>"): + return NotEqualTo(result.column, result.literal) + raise ValueError(f"Unsupported operation type: {result.op}") + + +@right_ref.set_parse_action +def _(result: ParseResults) -> BooleanExpression: + if result.op == "<": + return GreaterThan(result.column, result.literal) + elif result.op == "<=": + return GreaterThanOrEqual(result.column, result.literal) + elif result.op == ">": + return LessThan(result.column, result.literal) + elif result.op == ">=": + return LessThanOrEqual(result.column, result.literal) + elif result.op in ("=", "=="): + return EqualTo(result.column, result.literal) + elif result.op in ("!=", "<>"): + return NotEqualTo(result.column, result.literal) + raise ValueError(f"Unsupported operation type: {result.op}") + + +is_null = column + IS + NULL +not_null = column + IS + NOT + NULL +null_check = is_null | not_null + + +@is_null.set_parse_action +def _(result: ParseResults) -> BooleanExpression: + return IsNull(result.column) + + +@not_null.set_parse_action +def _(result: ParseResults) -> BooleanExpression: + return NotNull(result.column) + + +is_nan = column + IS + NAN +not_nan = column + IS + NOT + NAN +nan_check = is_nan | not_nan + + +@is_nan.set_parse_action +def _(result: ParseResults) -> BooleanExpression: + return IsNaN(result.column) + + +@not_nan.set_parse_action +def _(result: ParseResults) -> BooleanExpression: + return NotNaN(result.column) + + +is_in = column + IN + "(" + literal_set + ")" +not_in = column + NOT + IN + "(" + literal_set + ")" +in_check = is_in | not_in + + +@is_in.set_parse_action +def _(result: ParseResults) -> BooleanExpression: + return In(result.column, result.literal_set) + + +@not_in.set_parse_action +def _(result: ParseResults) -> BooleanExpression: + return NotIn(result.column, result.literal_set) + + +starts_with = column + LIKE + string +not_starts_with = column + NOT + LIKE + string +starts_check = starts_with | not_starts_with + + +@starts_with.set_parse_action +def _(result: ParseResults) -> BooleanExpression: + return StartsWith(result.column, result.raw_quoted_string) + + +@not_starts_with.set_parse_action +def _(result: ParseResults) -> BooleanExpression: + return NotStartsWith(result.column, result.raw_quoted_string) + + +predicate = (comparison | in_check | null_check | nan_check | starts_check | boolean).set_results_name("predicate") + + +def handle_not(result: ParseResults) -> Not: + return Not(result[0][0]) + + +def handle_and(result: ParseResults) -> And: + return And(result[0][0], result[0][1]) + + +def handle_or(result: ParseResults) -> Or: + return Or(result[0][0], result[0][1]) + + +boolean_expression = infix_notation( + predicate, + [ + (Suppress(NOT), 1, opAssoc.RIGHT, handle_not), + (Suppress(AND), 2, opAssoc.LEFT, handle_and), + (Suppress(OR), 2, opAssoc.LEFT, handle_or), + ], +).set_name("expr") + + +def parse(expr: str) -> BooleanExpression: + """Parse a boolean expression.""" + return boolean_expression.parse_string(expr)[0] diff --git a/pyiceberg/expressions/visitors.py b/pyiceberg/expressions/visitors.py new file mode 100644 index 0000000000..a4f311fdd3 --- /dev/null +++ b/pyiceberg/expressions/visitors.py @@ -0,0 +1,1419 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +import math +from abc import ABC, abstractmethod +from functools import singledispatch +from typing import ( + Any, + Callable, + Dict, + Generic, + List, + Set, + Tuple, + TypeVar, + Union, +) + +from pyiceberg.conversions import from_bytes +from pyiceberg.expressions import ( + AlwaysFalse, + AlwaysTrue, + And, + BooleanExpression, + BoundEqualTo, + BoundGreaterThan, + BoundGreaterThanOrEqual, + BoundIn, + BoundIsNaN, + BoundIsNull, + BoundLessThan, + BoundLessThanOrEqual, + BoundLiteralPredicate, + BoundNotEqualTo, + BoundNotIn, + BoundNotNaN, + BoundNotNull, + BoundNotStartsWith, + BoundPredicate, + BoundSetPredicate, + BoundStartsWith, + BoundTerm, + BoundUnaryPredicate, + L, + Not, + Or, + UnboundPredicate, +) +from pyiceberg.expressions.literals import Literal +from pyiceberg.manifest import DataFile, ManifestFile, PartitionFieldSummary +from pyiceberg.partitioning import PartitionSpec +from pyiceberg.schema import Schema +from pyiceberg.typedef import EMPTY_DICT, StructProtocol +from pyiceberg.types import ( + DoubleType, + FloatType, + IcebergType, + PrimitiveType, + StructType, + TimestampType, + TimestamptzType, +) +from pyiceberg.utils.datetime import micros_to_timestamp, micros_to_timestamptz + +T = TypeVar("T") + + +class BooleanExpressionVisitor(Generic[T], ABC): + @abstractmethod + def visit_true(self) -> T: + """Visit method for an AlwaysTrue boolean expression. + + Note: This visit method has no arguments since AlwaysTrue instances have no context. + """ + + @abstractmethod + def visit_false(self) -> T: + """Visit method for an AlwaysFalse boolean expression. + + Note: This visit method has no arguments since AlwaysFalse instances have no context. + """ + + @abstractmethod + def visit_not(self, child_result: T) -> T: + """Visit method for a Not boolean expression. + + Args: + child_result (T): The result of visiting the child of the Not boolean expression. + """ + + @abstractmethod + def visit_and(self, left_result: T, right_result: T) -> T: + """Visit method for an And boolean expression. + + Args: + left_result (T): The result of visiting the left side of the expression. + right_result (T): The result of visiting the right side of the expression. + """ + + @abstractmethod + def visit_or(self, left_result: T, right_result: T) -> T: + """Visit method for an Or boolean expression. + + Args: + left_result (T): The result of visiting the left side of the expression. + right_result (T): The result of visiting the right side of the expression. + """ + + @abstractmethod + def visit_unbound_predicate(self, predicate: UnboundPredicate[L]) -> T: + """Visit method for an unbound predicate in an expression tree. + + Args: + predicate (UnboundPredicate[L): An instance of an UnboundPredicate. + """ + + @abstractmethod + def visit_bound_predicate(self, predicate: BoundPredicate[L]) -> T: + """Visit method for a bound predicate in an expression tree. + + Args: + predicate (BoundPredicate[L]): An instance of a BoundPredicate. + """ + + +@singledispatch +def visit(obj: BooleanExpression, visitor: BooleanExpressionVisitor[T]) -> T: + """Apply a boolean expression visitor to any point within an expression. + + The function traverses the expression in post-order fashion. + + Args: + obj (BooleanExpression): An instance of a BooleanExpression. + visitor (BooleanExpressionVisitor[T]): An instance of an implementation of the generic BooleanExpressionVisitor base class. + + Raises: + NotImplementedError: If attempting to visit an unsupported expression. + """ + raise NotImplementedError(f"Cannot visit unsupported expression: {obj}") + + +@visit.register(AlwaysTrue) +def _(_: AlwaysTrue, visitor: BooleanExpressionVisitor[T]) -> T: + """Visit an AlwaysTrue boolean expression with a concrete BooleanExpressionVisitor.""" + return visitor.visit_true() + + +@visit.register(AlwaysFalse) +def _(_: AlwaysFalse, visitor: BooleanExpressionVisitor[T]) -> T: + """Visit an AlwaysFalse boolean expression with a concrete BooleanExpressionVisitor.""" + return visitor.visit_false() + + +@visit.register(Not) +def _(obj: Not, visitor: BooleanExpressionVisitor[T]) -> T: + """Visit a Not boolean expression with a concrete BooleanExpressionVisitor.""" + child_result: T = visit(obj.child, visitor=visitor) + return visitor.visit_not(child_result=child_result) + + +@visit.register(And) +def _(obj: And, visitor: BooleanExpressionVisitor[T]) -> T: + """Visit an And boolean expression with a concrete BooleanExpressionVisitor.""" + left_result: T = visit(obj.left, visitor=visitor) + right_result: T = visit(obj.right, visitor=visitor) + return visitor.visit_and(left_result=left_result, right_result=right_result) + + +@visit.register(UnboundPredicate) +def _(obj: UnboundPredicate[L], visitor: BooleanExpressionVisitor[T]) -> T: + """Visit an unbound boolean expression with a concrete BooleanExpressionVisitor.""" + return visitor.visit_unbound_predicate(predicate=obj) + + +@visit.register(BoundPredicate) +def _(obj: BoundPredicate[L], visitor: BooleanExpressionVisitor[T]) -> T: + """Visit a bound boolean expression with a concrete BooleanExpressionVisitor.""" + return visitor.visit_bound_predicate(predicate=obj) + + +@visit.register(Or) +def _(obj: Or, visitor: BooleanExpressionVisitor[T]) -> T: + """Visit an Or boolean expression with a concrete BooleanExpressionVisitor.""" + left_result: T = visit(obj.left, visitor=visitor) + right_result: T = visit(obj.right, visitor=visitor) + return visitor.visit_or(left_result=left_result, right_result=right_result) + + +def bind(schema: Schema, expression: BooleanExpression, case_sensitive: bool) -> BooleanExpression: + """Travers over an expression to bind the predicates to the schema. + + Args: + schema (Schema): A schema to use when binding the expression. + expression (BooleanExpression): An expression containing UnboundPredicates that can be bound. + case_sensitive (bool): Whether to consider case when binding a reference to a field in a schema, defaults to True. + + Raises: + TypeError: In the case a predicate is already bound. + """ + return visit(expression, BindVisitor(schema, case_sensitive)) + + +class BindVisitor(BooleanExpressionVisitor[BooleanExpression]): + """Rewrites a boolean expression by replacing unbound references with references to fields in a struct schema. + + Args: + schema (Schema): A schema to use when binding the expression. + case_sensitive (bool): Whether to consider case when binding a reference to a field in a schema, defaults to True. + + Raises: + TypeError: In the case a predicate is already bound. + """ + + schema: Schema + case_sensitive: bool + + def __init__(self, schema: Schema, case_sensitive: bool) -> None: + self.schema = schema + self.case_sensitive = case_sensitive + + def visit_true(self) -> BooleanExpression: + return AlwaysTrue() + + def visit_false(self) -> BooleanExpression: + return AlwaysFalse() + + def visit_not(self, child_result: BooleanExpression) -> BooleanExpression: + return Not(child=child_result) + + def visit_and(self, left_result: BooleanExpression, right_result: BooleanExpression) -> BooleanExpression: + return And(left=left_result, right=right_result) + + def visit_or(self, left_result: BooleanExpression, right_result: BooleanExpression) -> BooleanExpression: + return Or(left=left_result, right=right_result) + + def visit_unbound_predicate(self, predicate: UnboundPredicate[L]) -> BooleanExpression: + return predicate.bind(self.schema, case_sensitive=self.case_sensitive) + + def visit_bound_predicate(self, predicate: BoundPredicate[L]) -> BooleanExpression: + raise TypeError(f"Found already bound predicate: {predicate}") + + +class BoundBooleanExpressionVisitor(BooleanExpressionVisitor[T], ABC): + @abstractmethod + def visit_in(self, term: BoundTerm[L], literals: Set[L]) -> T: + """Visit a bound In predicate.""" + + @abstractmethod + def visit_not_in(self, term: BoundTerm[L], literals: Set[L]) -> T: + """Visit a bound NotIn predicate.""" + + @abstractmethod + def visit_is_nan(self, term: BoundTerm[L]) -> T: + """Visit a bound IsNan predicate.""" + + @abstractmethod + def visit_not_nan(self, term: BoundTerm[L]) -> T: + """Visit a bound NotNan predicate.""" + + @abstractmethod + def visit_is_null(self, term: BoundTerm[L]) -> T: + """Visit a bound IsNull predicate.""" + + @abstractmethod + def visit_not_null(self, term: BoundTerm[L]) -> T: + """Visit a bound NotNull predicate.""" + + @abstractmethod + def visit_equal(self, term: BoundTerm[L], literal: Literal[L]) -> T: + """Visit a bound Equal predicate.""" + + @abstractmethod + def visit_not_equal(self, term: BoundTerm[L], literal: Literal[L]) -> T: + """Visit a bound NotEqual predicate.""" + + @abstractmethod + def visit_greater_than_or_equal(self, term: BoundTerm[L], literal: Literal[L]) -> T: + """Visit a bound GreaterThanOrEqual predicate.""" + + @abstractmethod + def visit_greater_than(self, term: BoundTerm[L], literal: Literal[L]) -> T: + """Visit a bound GreaterThan predicate.""" + + @abstractmethod + def visit_less_than(self, term: BoundTerm[L], literal: Literal[L]) -> T: + """Visit a bound LessThan predicate.""" + + @abstractmethod + def visit_less_than_or_equal(self, term: BoundTerm[L], literal: Literal[L]) -> T: + """Visit a bound LessThanOrEqual predicate.""" + + @abstractmethod + def visit_true(self) -> T: + """Visit a bound True predicate.""" + + @abstractmethod + def visit_false(self) -> T: + """Visit a bound False predicate.""" + + @abstractmethod + def visit_not(self, child_result: T) -> T: + """Visit a bound Not predicate.""" + + @abstractmethod + def visit_and(self, left_result: T, right_result: T) -> T: + """Visit a bound And predicate.""" + + @abstractmethod + def visit_or(self, left_result: T, right_result: T) -> T: + """Visit a bound Or predicate.""" + + @abstractmethod + def visit_starts_with(self, term: BoundTerm[L], literal: Literal[L]) -> T: + """Visit bound StartsWith predicate.""" + + @abstractmethod + def visit_not_starts_with(self, term: BoundTerm[L], literal: Literal[L]) -> T: + """Visit bound NotStartsWith predicate.""" + + def visit_unbound_predicate(self, predicate: UnboundPredicate[L]) -> T: + """Visit an unbound predicate. + + Args: + predicate (UnboundPredicate[L]): An unbound predicate. + Raises: + TypeError: This always raises since an unbound predicate is not expected in a bound boolean expression. + """ + raise TypeError(f"Not a bound predicate: {predicate}") + + def visit_bound_predicate(self, predicate: BoundPredicate[L]) -> T: + """Visit a bound predicate. + + Args: + predicate (BoundPredicate[L]): A bound predicate. + """ + return visit_bound_predicate(predicate, self) + + +@singledispatch +def visit_bound_predicate(expr: BoundPredicate[L], _: BooleanExpressionVisitor[T]) -> T: + raise TypeError(f"Unknown predicate: {expr}") + + +@visit_bound_predicate.register(BoundIn) +def _(expr: BoundIn[L], visitor: BoundBooleanExpressionVisitor[T]) -> T: + return visitor.visit_in(term=expr.term, literals=expr.value_set) + + +@visit_bound_predicate.register(BoundNotIn) +def _(expr: BoundNotIn[L], visitor: BoundBooleanExpressionVisitor[T]) -> T: + return visitor.visit_not_in(term=expr.term, literals=expr.value_set) + + +@visit_bound_predicate.register(BoundIsNaN) +def _(expr: BoundIsNaN[L], visitor: BoundBooleanExpressionVisitor[T]) -> T: + return visitor.visit_is_nan(term=expr.term) + + +@visit_bound_predicate.register(BoundNotNaN) +def _(expr: BoundNotNaN[L], visitor: BoundBooleanExpressionVisitor[T]) -> T: + return visitor.visit_not_nan(term=expr.term) + + +@visit_bound_predicate.register(BoundIsNull) +def _(expr: BoundIsNull[L], visitor: BoundBooleanExpressionVisitor[T]) -> T: + return visitor.visit_is_null(term=expr.term) + + +@visit_bound_predicate.register(BoundNotNull) +def _(expr: BoundNotNull[L], visitor: BoundBooleanExpressionVisitor[T]) -> T: + return visitor.visit_not_null(term=expr.term) + + +@visit_bound_predicate.register(BoundEqualTo) +def _(expr: BoundEqualTo[L], visitor: BoundBooleanExpressionVisitor[T]) -> T: + return visitor.visit_equal(term=expr.term, literal=expr.literal) + + +@visit_bound_predicate.register(BoundNotEqualTo) +def _(expr: BoundNotEqualTo[L], visitor: BoundBooleanExpressionVisitor[T]) -> T: + return visitor.visit_not_equal(term=expr.term, literal=expr.literal) + + +@visit_bound_predicate.register(BoundGreaterThanOrEqual) +def _(expr: BoundGreaterThanOrEqual[L], visitor: BoundBooleanExpressionVisitor[T]) -> T: + """Visit a bound GreaterThanOrEqual predicate.""" + return visitor.visit_greater_than_or_equal(term=expr.term, literal=expr.literal) + + +@visit_bound_predicate.register(BoundGreaterThan) +def _(expr: BoundGreaterThan[L], visitor: BoundBooleanExpressionVisitor[T]) -> T: + return visitor.visit_greater_than(term=expr.term, literal=expr.literal) + + +@visit_bound_predicate.register(BoundLessThan) +def _(expr: BoundLessThan[L], visitor: BoundBooleanExpressionVisitor[T]) -> T: + return visitor.visit_less_than(term=expr.term, literal=expr.literal) + + +@visit_bound_predicate.register(BoundLessThanOrEqual) +def _(expr: BoundLessThanOrEqual[L], visitor: BoundBooleanExpressionVisitor[T]) -> T: + return visitor.visit_less_than_or_equal(term=expr.term, literal=expr.literal) + + +@visit_bound_predicate.register(BoundStartsWith) +def _(expr: BoundStartsWith[L], visitor: BoundBooleanExpressionVisitor[T]) -> T: + return visitor.visit_starts_with(term=expr.term, literal=expr.literal) + + +@visit_bound_predicate.register(BoundNotStartsWith) +def _(expr: BoundNotStartsWith[L], visitor: BoundBooleanExpressionVisitor[T]) -> T: + return visitor.visit_not_starts_with(term=expr.term, literal=expr.literal) + + +def rewrite_not(expr: BooleanExpression) -> BooleanExpression: + return visit(expr, _RewriteNotVisitor()) + + +class _RewriteNotVisitor(BooleanExpressionVisitor[BooleanExpression]): + """Inverts the negations.""" + + def visit_true(self) -> BooleanExpression: + return AlwaysTrue() + + def visit_false(self) -> BooleanExpression: + return AlwaysFalse() + + def visit_not(self, child_result: BooleanExpression) -> BooleanExpression: + return ~child_result + + def visit_and(self, left_result: BooleanExpression, right_result: BooleanExpression) -> BooleanExpression: + return And(left=left_result, right=right_result) + + def visit_or(self, left_result: BooleanExpression, right_result: BooleanExpression) -> BooleanExpression: + return Or(left=left_result, right=right_result) + + def visit_unbound_predicate(self, predicate: UnboundPredicate[L]) -> BooleanExpression: + return predicate + + def visit_bound_predicate(self, predicate: BoundPredicate[L]) -> BooleanExpression: + return predicate + + +def expression_evaluator(schema: Schema, unbound: BooleanExpression, case_sensitive: bool) -> Callable[[StructProtocol], bool]: + return _ExpressionEvaluator(schema, unbound, case_sensitive).eval + + +class _ExpressionEvaluator(BoundBooleanExpressionVisitor[bool]): + bound: BooleanExpression + struct: StructProtocol + + def __init__(self, schema: Schema, unbound: BooleanExpression, case_sensitive: bool): + self.bound = bind(schema, unbound, case_sensitive) + + def eval(self, struct: StructProtocol) -> bool: + self.struct = struct + return visit(self.bound, self) + + def visit_in(self, term: BoundTerm[L], literals: Set[L]) -> bool: + return term.eval(self.struct) in literals + + def visit_not_in(self, term: BoundTerm[L], literals: Set[L]) -> bool: + return term.eval(self.struct) not in literals + + def visit_is_nan(self, term: BoundTerm[L]) -> bool: + val = term.eval(self.struct) + return val != val + + def visit_not_nan(self, term: BoundTerm[L]) -> bool: + val = term.eval(self.struct) + return val == val + + def visit_is_null(self, term: BoundTerm[L]) -> bool: + return term.eval(self.struct) is None + + def visit_not_null(self, term: BoundTerm[L]) -> bool: + return term.eval(self.struct) is not None + + def visit_equal(self, term: BoundTerm[L], literal: Literal[L]) -> bool: + return term.eval(self.struct) == literal.value + + def visit_not_equal(self, term: BoundTerm[L], literal: Literal[L]) -> bool: + return term.eval(self.struct) != literal.value + + def visit_greater_than_or_equal(self, term: BoundTerm[L], literal: Literal[L]) -> bool: + value = term.eval(self.struct) + return value is not None and value >= literal.value + + def visit_greater_than(self, term: BoundTerm[L], literal: Literal[L]) -> bool: + value = term.eval(self.struct) + return value is not None and value > literal.value + + def visit_less_than(self, term: BoundTerm[L], literal: Literal[L]) -> bool: + value = term.eval(self.struct) + return value is not None and value < literal.value + + def visit_less_than_or_equal(self, term: BoundTerm[L], literal: Literal[L]) -> bool: + value = term.eval(self.struct) + return value is not None and value <= literal.value + + def visit_starts_with(self, term: BoundTerm[L], literal: Literal[L]) -> bool: + eval_res = term.eval(self.struct) + return eval_res is not None and str(eval_res).startswith(str(literal.value)) + + def visit_not_starts_with(self, term: BoundTerm[L], literal: Literal[L]) -> bool: + return not self.visit_starts_with(term, literal) + + def visit_true(self) -> bool: + return True + + def visit_false(self) -> bool: + return False + + def visit_not(self, child_result: bool) -> bool: + return not child_result + + def visit_and(self, left_result: bool, right_result: bool) -> bool: + return left_result and right_result + + def visit_or(self, left_result: bool, right_result: bool) -> bool: + return left_result or right_result + + +ROWS_MIGHT_MATCH = True +ROWS_CANNOT_MATCH = False +IN_PREDICATE_LIMIT = 200 + + +def _from_byte_buffer(field_type: IcebergType, val: bytes) -> Any: + if not isinstance(field_type, PrimitiveType): + raise ValueError(f"Expected a PrimitiveType, got: {type(field_type)}") + return from_bytes(field_type, val) + + +class _ManifestEvalVisitor(BoundBooleanExpressionVisitor[bool]): + partition_fields: List[PartitionFieldSummary] + partition_filter: BooleanExpression + + def __init__(self, partition_struct_schema: Schema, partition_filter: BooleanExpression, case_sensitive: bool) -> None: + self.partition_filter = bind(partition_struct_schema, rewrite_not(partition_filter), case_sensitive) + + def eval(self, manifest: ManifestFile) -> bool: + if partitions := manifest.partitions: + self.partition_fields = partitions + return visit(self.partition_filter, self) + + # No partition information + return ROWS_MIGHT_MATCH + + def visit_in(self, term: BoundTerm[L], literals: Set[L]) -> bool: + pos = term.ref().accessor.position + field = self.partition_fields[pos] + + if field.lower_bound is None: + return ROWS_CANNOT_MATCH + + if len(literals) > IN_PREDICATE_LIMIT: + return ROWS_MIGHT_MATCH + + lower = _from_byte_buffer(term.ref().field.field_type, field.lower_bound) + + if all(lower > val for val in literals): + return ROWS_CANNOT_MATCH + + if field.upper_bound is not None: + upper = _from_byte_buffer(term.ref().field.field_type, field.upper_bound) + if all(upper < val for val in literals): + return ROWS_CANNOT_MATCH + + return ROWS_MIGHT_MATCH + + def visit_not_in(self, term: BoundTerm[L], literals: Set[L]) -> bool: + # because the bounds are not necessarily a min or max value, this cannot be answered using + # them. notIn(col, {X, ...}) with (X, Y) doesn't guarantee that X is a value in col. + return ROWS_MIGHT_MATCH + + def visit_is_nan(self, term: BoundTerm[L]) -> bool: + pos = term.ref().accessor.position + field = self.partition_fields[pos] + + if field.contains_nan is False: + return ROWS_CANNOT_MATCH + + return ROWS_MIGHT_MATCH + + def visit_not_nan(self, term: BoundTerm[L]) -> bool: + pos = term.ref().accessor.position + field = self.partition_fields[pos] + + if field.contains_nan is True and field.contains_null is False and field.lower_bound is None: + return ROWS_CANNOT_MATCH + + return ROWS_MIGHT_MATCH + + def visit_is_null(self, term: BoundTerm[L]) -> bool: + pos = term.ref().accessor.position + + if self.partition_fields[pos].contains_null is False: + return ROWS_CANNOT_MATCH + + return ROWS_MIGHT_MATCH + + def visit_not_null(self, term: BoundTerm[L]) -> bool: + pos = term.ref().accessor.position + + # contains_null encodes whether at least one partition value is null, + # lowerBound is null if all partition values are null + all_null = self.partition_fields[pos].contains_null is True and self.partition_fields[pos].lower_bound is None + + if all_null and type(term.ref().field.field_type) in {DoubleType, FloatType}: + # floating point types may include NaN values, which we check separately. + # In case bounds don't include NaN value, contains_nan needs to be checked against. + all_null = self.partition_fields[pos].contains_nan is False + + if all_null: + return ROWS_CANNOT_MATCH + + return ROWS_MIGHT_MATCH + + def visit_equal(self, term: BoundTerm[L], literal: Literal[L]) -> bool: + pos = term.ref().accessor.position + field = self.partition_fields[pos] + + if field.lower_bound is None or field.upper_bound is None: + # values are all null and literal cannot contain null + return ROWS_CANNOT_MATCH + + lower = _from_byte_buffer(term.ref().field.field_type, field.lower_bound) + + if lower > literal.value: + return ROWS_CANNOT_MATCH + + upper = _from_byte_buffer(term.ref().field.field_type, field.upper_bound) + + if literal.value > upper: + return ROWS_CANNOT_MATCH + + return ROWS_MIGHT_MATCH + + def visit_not_equal(self, term: BoundTerm[L], literal: Literal[L]) -> bool: + # because the bounds are not necessarily a min or max value, this cannot be answered using + # them. notEq(col, X) with (X, Y) doesn't guarantee that X is a value in col. + return ROWS_MIGHT_MATCH + + def visit_greater_than_or_equal(self, term: BoundTerm[L], literal: Literal[L]) -> bool: + pos = term.ref().accessor.position + field = self.partition_fields[pos] + + if field.upper_bound is None: + return ROWS_CANNOT_MATCH + + upper = _from_byte_buffer(term.ref().field.field_type, field.upper_bound) + + if literal.value > upper: + return ROWS_CANNOT_MATCH + + return ROWS_MIGHT_MATCH + + def visit_greater_than(self, term: BoundTerm[L], literal: Literal[L]) -> bool: + pos = term.ref().accessor.position + field = self.partition_fields[pos] + + if field.upper_bound is None: + return ROWS_CANNOT_MATCH + + upper = _from_byte_buffer(term.ref().field.field_type, field.upper_bound) + + if literal.value >= upper: + return ROWS_CANNOT_MATCH + + return ROWS_MIGHT_MATCH + + def visit_less_than(self, term: BoundTerm[L], literal: Literal[L]) -> bool: + pos = term.ref().accessor.position + field = self.partition_fields[pos] + + if field.lower_bound is None: + return ROWS_CANNOT_MATCH + + lower = _from_byte_buffer(term.ref().field.field_type, field.lower_bound) + + if literal.value <= lower: + return ROWS_CANNOT_MATCH + + return ROWS_MIGHT_MATCH + + def visit_less_than_or_equal(self, term: BoundTerm[L], literal: Literal[L]) -> bool: + pos = term.ref().accessor.position + field = self.partition_fields[pos] + + if field.lower_bound is None: + return ROWS_CANNOT_MATCH + + lower = _from_byte_buffer(term.ref().field.field_type, field.lower_bound) + + if literal.value < lower: + return ROWS_CANNOT_MATCH + + return ROWS_MIGHT_MATCH + + def visit_starts_with(self, term: BoundTerm[L], literal: Literal[L]) -> bool: + pos = term.ref().accessor.position + field = self.partition_fields[pos] + prefix = str(literal.value) + len_prefix = len(prefix) + + if field.lower_bound is None: + return ROWS_CANNOT_MATCH + + lower = _from_byte_buffer(term.ref().field.field_type, field.lower_bound) + # truncate lower bound so that its length is not greater than the length of prefix + if lower is not None and lower[:len_prefix] > prefix: + return ROWS_CANNOT_MATCH + + if field.upper_bound is None: + return ROWS_CANNOT_MATCH + + upper = _from_byte_buffer(term.ref().field.field_type, field.upper_bound) + # truncate upper bound so that its length is not greater than the length of prefix + if upper is not None and upper[:len_prefix] < prefix: + return ROWS_CANNOT_MATCH + + return ROWS_MIGHT_MATCH + + def visit_not_starts_with(self, term: BoundTerm[L], literal: Literal[L]) -> bool: + pos = term.ref().accessor.position + field = self.partition_fields[pos] + prefix = str(literal.value) + len_prefix = len(prefix) + + if field.contains_null or field.lower_bound is None or field.upper_bound is None: + return ROWS_MIGHT_MATCH + + # not_starts_with will match unless all values must start with the prefix. This happens when + # the lower and upper bounds both start with the prefix. + lower = _from_byte_buffer(term.ref().field.field_type, field.lower_bound) + upper = _from_byte_buffer(term.ref().field.field_type, field.upper_bound) + + if lower is not None and upper is not None: + # if lower is shorter than the prefix then lower doesn't start with the prefix + if len(lower) < len_prefix: + return ROWS_MIGHT_MATCH + + if lower[:len_prefix] == prefix: + # if upper is shorter than the prefix then upper can't start with the prefix + if len(upper) < len_prefix: + return ROWS_MIGHT_MATCH + + if upper[:len_prefix] == prefix: + return ROWS_CANNOT_MATCH + + return ROWS_MIGHT_MATCH + + def visit_true(self) -> bool: + return ROWS_MIGHT_MATCH + + def visit_false(self) -> bool: + return ROWS_CANNOT_MATCH + + def visit_not(self, child_result: bool) -> bool: + return not child_result + + def visit_and(self, left_result: bool, right_result: bool) -> bool: + return left_result and right_result + + def visit_or(self, left_result: bool, right_result: bool) -> bool: + return left_result or right_result + + +def manifest_evaluator( + partition_spec: PartitionSpec, schema: Schema, partition_filter: BooleanExpression, case_sensitive: bool = True +) -> Callable[[ManifestFile], bool]: + partition_type = partition_spec.partition_type(schema) + partition_schema = Schema(*partition_type.fields) + evaluator = _ManifestEvalVisitor(partition_schema, partition_filter, case_sensitive) + return evaluator.eval + + +class ProjectionEvaluator(BooleanExpressionVisitor[BooleanExpression], ABC): + schema: Schema + spec: PartitionSpec + case_sensitive: bool + + def __init__(self, schema: Schema, spec: PartitionSpec, case_sensitive: bool): + self.schema = schema + self.spec = spec + self.case_sensitive = case_sensitive + + def project(self, expr: BooleanExpression) -> BooleanExpression: + # projections assume that there are no NOT nodes in the expression tree. to ensure that this + # is the case, the expression is rewritten to push all NOT nodes down to the expression + # leaf nodes. + # this is necessary to ensure that the default expression returned when a predicate can't be + # projected is correct. + return visit(bind(self.schema, rewrite_not(expr), self.case_sensitive), self) + + def visit_true(self) -> BooleanExpression: + return AlwaysTrue() + + def visit_false(self) -> BooleanExpression: + return AlwaysFalse() + + def visit_not(self, child_result: BooleanExpression) -> BooleanExpression: + raise ValueError(f"Cannot project not expression, should be rewritten: {child_result}") + + def visit_and(self, left_result: BooleanExpression, right_result: BooleanExpression) -> BooleanExpression: + return And(left_result, right_result) + + def visit_or(self, left_result: BooleanExpression, right_result: BooleanExpression) -> BooleanExpression: + return Or(left_result, right_result) + + def visit_unbound_predicate(self, predicate: UnboundPredicate[L]) -> BooleanExpression: + raise ValueError(f"Cannot project unbound predicate: {predicate}") + + +class InclusiveProjection(ProjectionEvaluator): + def visit_bound_predicate(self, predicate: BoundPredicate[Any]) -> BooleanExpression: + parts = self.spec.fields_by_source_id(predicate.term.ref().field.field_id) + + result: BooleanExpression = AlwaysTrue() + for part in parts: + # consider (d = 2019-01-01) with bucket(7, d) and bucket(5, d) + # projections: b1 = bucket(7, '2019-01-01') = 5, b2 = bucket(5, '2019-01-01') = 0 + # any value where b1 != 5 or any value where b2 != 0 cannot be the '2019-01-01' + # + # similarly, if partitioning by day(ts) and hour(ts), the more restrictive + # projection should be used. ts = 2019-01-01T01:00:00 produces day=2019-01-01 and + # hour=2019-01-01-01. the value will be in 2019-01-01-01 and not in 2019-01-01-02. + incl_projection = part.transform.project(name=part.name, pred=predicate) + if incl_projection is not None: + result = And(result, incl_projection) + + return result + + +def inclusive_projection( + schema: Schema, spec: PartitionSpec, case_sensitive: bool = True +) -> Callable[[BooleanExpression], BooleanExpression]: + return InclusiveProjection(schema, spec, case_sensitive).project + + +class _ColumnNameTranslator(BooleanExpressionVisitor[BooleanExpression]): + """Converts the column names with the ones in the actual file. + + Args: + file_schema (Schema): The schema of the file. + case_sensitive (bool): Whether to consider case when binding a reference to a field in a schema, defaults to True. + + Raises: + TypeError: In the case of an UnboundPredicate. + ValueError: When a column name cannot be found. + """ + + file_schema: Schema + case_sensitive: bool + + def __init__(self, file_schema: Schema, case_sensitive: bool) -> None: + self.file_schema = file_schema + self.case_sensitive = case_sensitive + + def visit_true(self) -> BooleanExpression: + return AlwaysTrue() + + def visit_false(self) -> BooleanExpression: + return AlwaysFalse() + + def visit_not(self, child_result: BooleanExpression) -> BooleanExpression: + return Not(child=child_result) + + def visit_and(self, left_result: BooleanExpression, right_result: BooleanExpression) -> BooleanExpression: + return And(left=left_result, right=right_result) + + def visit_or(self, left_result: BooleanExpression, right_result: BooleanExpression) -> BooleanExpression: + return Or(left=left_result, right=right_result) + + def visit_unbound_predicate(self, predicate: UnboundPredicate[L]) -> BooleanExpression: + raise TypeError(f"Expected Bound Predicate, got: {predicate.term}") + + def visit_bound_predicate(self, predicate: BoundPredicate[L]) -> BooleanExpression: + file_column_name = self.file_schema.find_column_name(predicate.term.ref().field.field_id) + + if not file_column_name: + raise ValueError(f"Not found in file schema: {file_column_name}") + + if isinstance(predicate, BoundUnaryPredicate): + return predicate.as_unbound(file_column_name) + elif isinstance(predicate, BoundLiteralPredicate): + return predicate.as_unbound(file_column_name, predicate.literal) + elif isinstance(predicate, BoundSetPredicate): + return predicate.as_unbound(file_column_name, predicate.literals) + else: + raise ValueError(f"Unsupported predicate: {predicate}") + + +def translate_column_names(expr: BooleanExpression, file_schema: Schema, case_sensitive: bool) -> BooleanExpression: + return visit(expr, _ColumnNameTranslator(file_schema, case_sensitive)) + + +class _ExpressionFieldIDs(BooleanExpressionVisitor[Set[int]]): + """Extracts the field IDs used in the BooleanExpression.""" + + def visit_true(self) -> Set[int]: + return set() + + def visit_false(self) -> Set[int]: + return set() + + def visit_not(self, child_result: Set[int]) -> Set[int]: + return child_result + + def visit_and(self, left_result: Set[int], right_result: Set[int]) -> Set[int]: + return left_result.union(right_result) + + def visit_or(self, left_result: Set[int], right_result: Set[int]) -> Set[int]: + return left_result.union(right_result) + + def visit_unbound_predicate(self, predicate: UnboundPredicate[L]) -> Set[int]: + raise ValueError("Only works on bound records") + + def visit_bound_predicate(self, predicate: BoundPredicate[L]) -> Set[int]: + return {predicate.term.ref().field.field_id} + + +def extract_field_ids(expr: BooleanExpression) -> Set[int]: + return visit(expr, _ExpressionFieldIDs()) + + +class _RewriteToDNF(BooleanExpressionVisitor[Tuple[BooleanExpression, ...]]): + def visit_true(self) -> Tuple[BooleanExpression, ...]: + return (AlwaysTrue(),) + + def visit_false(self) -> Tuple[BooleanExpression, ...]: + return (AlwaysFalse(),) + + def visit_not(self, child_result: Tuple[BooleanExpression, ...]) -> Tuple[BooleanExpression, ...]: + raise ValueError(f"Not expressions are not allowed: {child_result}") + + def visit_and( + self, left_result: Tuple[BooleanExpression, ...], right_result: Tuple[BooleanExpression, ...] + ) -> Tuple[BooleanExpression, ...]: + # Distributive law: + # ((P OR Q) AND (R OR S)) AND (((P AND R) OR (P AND S)) OR ((Q AND R) OR ((Q AND S))) + # A AND (B OR C) = (A AND B) OR (A AND C) + # (A OR B) AND C = (A AND C) OR (B AND C) + return tuple(And(le, re) for le in left_result for re in right_result) + + def visit_or( + self, left_result: Tuple[BooleanExpression, ...], right_result: Tuple[BooleanExpression, ...] + ) -> Tuple[BooleanExpression, ...]: + return left_result + right_result + + def visit_unbound_predicate(self, predicate: UnboundPredicate[L]) -> Tuple[BooleanExpression, ...]: + return (predicate,) + + def visit_bound_predicate(self, predicate: BoundPredicate[L]) -> Tuple[BooleanExpression, ...]: + return (predicate,) + + +def rewrite_to_dnf(expr: BooleanExpression) -> Tuple[BooleanExpression, ...]: + # Rewrites an arbitrary boolean expression to disjunctive normal form (DNF): + # (A AND NOT(B) AND C) OR (NOT(D) AND E AND F) OR (G) + expr_without_not = rewrite_not(expr) + return visit(expr_without_not, _RewriteToDNF()) + + +class ExpressionToPlainFormat(BoundBooleanExpressionVisitor[List[Tuple[str, str, Any]]]): + cast_int_to_date: bool + + def __init__(self, cast_int_to_date: bool = False) -> None: + self.cast_int_to_date = cast_int_to_date + + def _cast_if_necessary(self, iceberg_type: IcebergType, literal: Union[L, Set[L]]) -> Union[L, Set[L]]: + if self.cast_int_to_date: + iceberg_type_class = type(iceberg_type) + conversions = {TimestampType: micros_to_timestamp, TimestamptzType: micros_to_timestamptz} + if iceberg_type_class in conversions: + conversion_function = conversions[iceberg_type_class] + if isinstance(literal, set): + return {conversion_function(lit) for lit in literal} # type: ignore + else: + return conversion_function(literal) # type: ignore + return literal + + def visit_in(self, term: BoundTerm[L], literals: Set[L]) -> List[Tuple[str, str, Any]]: + field = term.ref().field + return [(term.ref().field.name, "in", self._cast_if_necessary(field.field_type, literals))] + + def visit_not_in(self, term: BoundTerm[L], literals: Set[L]) -> List[Tuple[str, str, Any]]: + field = term.ref().field + return [(field.name, "not in", self._cast_if_necessary(field.field_type, literals))] + + def visit_is_nan(self, term: BoundTerm[L]) -> List[Tuple[str, str, Any]]: + return [(term.ref().field.name, "==", float("nan"))] + + def visit_not_nan(self, term: BoundTerm[L]) -> List[Tuple[str, str, Any]]: + return [(term.ref().field.name, "!=", float("nan"))] + + def visit_is_null(self, term: BoundTerm[L]) -> List[Tuple[str, str, Any]]: + return [(term.ref().field.name, "==", None)] + + def visit_not_null(self, term: BoundTerm[L]) -> List[Tuple[str, str, Any]]: + return [(term.ref().field.name, "!=", None)] + + def visit_equal(self, term: BoundTerm[L], literal: Literal[L]) -> List[Tuple[str, str, Any]]: + return [(term.ref().field.name, "==", self._cast_if_necessary(term.ref().field.field_type, literal.value))] + + def visit_not_equal(self, term: BoundTerm[L], literal: Literal[L]) -> List[Tuple[str, str, Any]]: + return [(term.ref().field.name, "!=", self._cast_if_necessary(term.ref().field.field_type, literal.value))] + + def visit_greater_than_or_equal(self, term: BoundTerm[L], literal: Literal[L]) -> List[Tuple[str, str, Any]]: + return [(term.ref().field.name, ">=", self._cast_if_necessary(term.ref().field.field_type, literal.value))] + + def visit_greater_than(self, term: BoundTerm[L], literal: Literal[L]) -> List[Tuple[str, str, Any]]: + return [(term.ref().field.name, ">", self._cast_if_necessary(term.ref().field.field_type, literal.value))] + + def visit_less_than(self, term: BoundTerm[L], literal: Literal[L]) -> List[Tuple[str, str, Any]]: + return [(term.ref().field.name, "<", self._cast_if_necessary(term.ref().field.field_type, literal.value))] + + def visit_less_than_or_equal(self, term: BoundTerm[L], literal: Literal[L]) -> List[Tuple[str, str, Any]]: + return [(term.ref().field.name, "<=", self._cast_if_necessary(term.ref().field.field_type, literal.value))] + + def visit_starts_with(self, term: BoundTerm[L], literal: Literal[L]) -> List[Tuple[str, str, Any]]: + return [] + + def visit_not_starts_with(self, term: BoundTerm[L], literal: Literal[L]) -> List[Tuple[str, str, Any]]: + return [] + + def visit_true(self) -> List[Tuple[str, str, Any]]: + return [] # Not supported + + def visit_false(self) -> List[Tuple[str, str, Any]]: + raise ValueError("Not supported: AlwaysFalse") + + def visit_not(self, child_result: List[Tuple[str, str, Any]]) -> List[Tuple[str, str, Any]]: + raise ValueError(f"Not allowed: {child_result}") + + def visit_and( + self, left_result: List[Tuple[str, str, Any]], right_result: List[Tuple[str, str, Any]] + ) -> List[Tuple[str, str, Any]]: + return left_result + right_result + + def visit_or( + self, left_result: List[Tuple[str, str, Any]], right_result: List[Tuple[str, str, Any]] + ) -> List[Tuple[str, str, Any]]: + raise ValueError(f"Not allowed: {left_result} || {right_result}") + + +def expression_to_plain_format( + expressions: Tuple[BooleanExpression, ...], cast_int_to_datetime: bool = False +) -> List[List[Tuple[str, str, Any]]]: + """Format a Disjunctive Normal Form expression. + + These are the formats that the expression can be fed into: + + - https://arrow.apache.org/docs/python/generated/pyarrow.parquet.read_table.html + - https://docs.dask.org/en/stable/generated/dask.dataframe.read_parquet.html + + Contrary to normal DNF that may contain Not expressions, but here they should have + been rewritten. This can be done using ``rewrite_not(...)``. + + Keep in mind that this is only used for page skipping, and still needs to filter + on a row level. + + Args: + expressions: Expression in Disjunctive Normal Form. + + Returns: + Formatter filter compatible with Dask and PyArrow. + """ + # In the form of expr1 ∨ expr2 ∨ ... ∨ exprN + visitor = ExpressionToPlainFormat(cast_int_to_datetime) + return [visit(expression, visitor) for expression in expressions] + + +class _InclusiveMetricsEvaluator(BoundBooleanExpressionVisitor[bool]): + struct: StructType + expr: BooleanExpression + + value_counts: Dict[int, int] + null_counts: Dict[int, int] + nan_counts: Dict[int, int] + lower_bounds: Dict[int, bytes] + upper_bounds: Dict[int, bytes] + + def __init__( + self, schema: Schema, expr: BooleanExpression, case_sensitive: bool = True, include_empty_files: bool = False + ) -> None: + self.struct = schema.as_struct() + self.include_empty_files = include_empty_files + self.expr = bind(schema, rewrite_not(expr), case_sensitive) + + def eval(self, file: DataFile) -> bool: + """Test whether the file may contain records that match the expression.""" + if not self.include_empty_files and file.record_count == 0: + return ROWS_CANNOT_MATCH + + if file.record_count < 0: + # Older version don't correctly implement record count from avro file and thus + # set record count -1 when importing avro tables to iceberg tables. This should + # be updated once we implemented and set correct record count. + return ROWS_MIGHT_MATCH + + self.value_counts = file.value_counts or EMPTY_DICT + self.null_counts = file.null_value_counts or EMPTY_DICT + self.nan_counts = file.nan_value_counts or EMPTY_DICT + self.lower_bounds = file.lower_bounds or EMPTY_DICT + self.upper_bounds = file.upper_bounds or EMPTY_DICT + + return visit(self.expr, self) + + def _may_contain_null(self, field_id: int) -> bool: + return self.null_counts is None or (field_id in self.null_counts and self.null_counts.get(field_id) is not None) + + def _contains_nulls_only(self, field_id: int) -> bool: + if (value_count := self.value_counts.get(field_id)) and (null_count := self.null_counts.get(field_id)): + return value_count == null_count + return False + + def _contains_nans_only(self, field_id: int) -> bool: + if (nan_count := self.nan_counts.get(field_id)) and (value_count := self.value_counts.get(field_id)): + return nan_count == value_count + return False + + def _is_nan(self, val: Any) -> bool: + try: + return math.isnan(val) + except TypeError: + # In the case of None or other non-numeric types + return False + + def visit_true(self) -> bool: + # all rows match + return ROWS_MIGHT_MATCH + + def visit_false(self) -> bool: + # all rows fail + return ROWS_CANNOT_MATCH + + def visit_not(self, child_result: bool) -> bool: + raise ValueError(f"NOT should be rewritten: {child_result}") + + def visit_and(self, left_result: bool, right_result: bool) -> bool: + return left_result and right_result + + def visit_or(self, left_result: bool, right_result: bool) -> bool: + return left_result or right_result + + def visit_is_null(self, term: BoundTerm[L]) -> bool: + field_id = term.ref().field.field_id + + if self.null_counts.get(field_id) == 0: + return ROWS_CANNOT_MATCH + + return ROWS_MIGHT_MATCH + + def visit_not_null(self, term: BoundTerm[L]) -> bool: + # no need to check whether the field is required because binding evaluates that case + # if the column has no non-null values, the expression cannot match + field_id = term.ref().field.field_id + + if self._contains_nulls_only(field_id): + return ROWS_CANNOT_MATCH + + return ROWS_MIGHT_MATCH + + def visit_is_nan(self, term: BoundTerm[L]) -> bool: + field_id = term.ref().field.field_id + + if self.nan_counts.get(field_id) == 0: + return ROWS_CANNOT_MATCH + + # when there's no nanCounts information, but we already know the column only contains null, + # it's guaranteed that there's no NaN value + if self._contains_nulls_only(field_id): + return ROWS_CANNOT_MATCH + + return ROWS_MIGHT_MATCH + + def visit_not_nan(self, term: BoundTerm[L]) -> bool: + field_id = term.ref().field.field_id + + if self._contains_nans_only(field_id): + return ROWS_CANNOT_MATCH + + return ROWS_MIGHT_MATCH + + def visit_less_than(self, term: BoundTerm[L], literal: Literal[L]) -> bool: + field = term.ref().field + field_id = field.field_id + + if self._contains_nulls_only(field_id) or self._contains_nans_only(field_id): + return ROWS_CANNOT_MATCH + + if not isinstance(field.field_type, PrimitiveType): + raise ValueError(f"Expected PrimitiveType: {field.field_type}") + + if lower_bound_bytes := self.lower_bounds.get(field_id): + lower_bound = from_bytes(field.field_type, lower_bound_bytes) + + if self._is_nan(lower_bound): + # NaN indicates unreliable bounds. See the InclusiveMetricsEvaluator docs for more. + return ROWS_MIGHT_MATCH + + if lower_bound >= literal.value: + return ROWS_CANNOT_MATCH + + return ROWS_MIGHT_MATCH + + def visit_less_than_or_equal(self, term: BoundTerm[L], literal: Literal[L]) -> bool: + field = term.ref().field + field_id = field.field_id + + if self._contains_nulls_only(field_id) or self._contains_nans_only(field_id): + return ROWS_CANNOT_MATCH + + if not isinstance(field.field_type, PrimitiveType): + raise ValueError(f"Expected PrimitiveType: {field.field_type}") + + if lower_bound_bytes := self.lower_bounds.get(field_id): + lower_bound = from_bytes(field.field_type, lower_bound_bytes) + if self._is_nan(lower_bound): + # NaN indicates unreliable bounds. See the InclusiveMetricsEvaluator docs for more. + return ROWS_MIGHT_MATCH + + if lower_bound > literal.value: + return ROWS_CANNOT_MATCH + + return ROWS_MIGHT_MATCH + + def visit_greater_than(self, term: BoundTerm[L], literal: Literal[L]) -> bool: + field = term.ref().field + field_id = field.field_id + + if self._contains_nulls_only(field_id) or self._contains_nans_only(field_id): + return ROWS_CANNOT_MATCH + + if not isinstance(field.field_type, PrimitiveType): + raise ValueError(f"Expected PrimitiveType: {field.field_type}") + + if upper_bound_bytes := self.upper_bounds.get(field_id): + upper_bound = from_bytes(field.field_type, upper_bound_bytes) + if upper_bound <= literal.value: + if self._is_nan(upper_bound): + # NaN indicates unreliable bounds. See the InclusiveMetricsEvaluator docs for more. + return ROWS_MIGHT_MATCH + + return ROWS_CANNOT_MATCH + + return ROWS_MIGHT_MATCH + + def visit_greater_than_or_equal(self, term: BoundTerm[L], literal: Literal[L]) -> bool: + field = term.ref().field + field_id = field.field_id + + if self._contains_nulls_only(field_id) or self._contains_nans_only(field_id): + return ROWS_CANNOT_MATCH + + if not isinstance(field.field_type, PrimitiveType): + raise ValueError(f"Expected PrimitiveType: {field.field_type}") + + if upper_bound_bytes := self.upper_bounds.get(field_id): + upper_bound = from_bytes(field.field_type, upper_bound_bytes) + if upper_bound < literal.value: + if self._is_nan(upper_bound): + # NaN indicates unreliable bounds. See the InclusiveMetricsEvaluator docs for more. + return ROWS_MIGHT_MATCH + + return ROWS_CANNOT_MATCH + + return ROWS_MIGHT_MATCH + + def visit_equal(self, term: BoundTerm[L], literal: Literal[L]) -> bool: + field = term.ref().field + field_id = field.field_id + + if self._contains_nulls_only(field_id) or self._contains_nans_only(field_id): + return ROWS_CANNOT_MATCH + + if not isinstance(field.field_type, PrimitiveType): + raise ValueError(f"Expected PrimitiveType: {field.field_type}") + + if lower_bound_bytes := self.lower_bounds.get(field_id): + lower_bound = from_bytes(field.field_type, lower_bound_bytes) + if self._is_nan(lower_bound): + # NaN indicates unreliable bounds. See the InclusiveMetricsEvaluator docs for more. + return ROWS_MIGHT_MATCH + + if lower_bound > literal.value: + return ROWS_CANNOT_MATCH + + if upper_bound_bytes := self.upper_bounds.get(field_id): + upper_bound = from_bytes(field.field_type, upper_bound_bytes) + if self._is_nan(upper_bound): + # NaN indicates unreliable bounds. See the InclusiveMetricsEvaluator docs for more. + return ROWS_MIGHT_MATCH + + if upper_bound < literal.value: + return ROWS_CANNOT_MATCH + + return ROWS_MIGHT_MATCH + + def visit_not_equal(self, term: BoundTerm[L], literal: Literal[L]) -> bool: + return ROWS_MIGHT_MATCH + + def visit_in(self, term: BoundTerm[L], literals: Set[L]) -> bool: + field = term.ref().field + field_id = field.field_id + + if self._contains_nulls_only(field_id) or self._contains_nans_only(field_id): + return ROWS_CANNOT_MATCH + + if len(literals) > IN_PREDICATE_LIMIT: + # skip evaluating the predicate if the number of values is too big + return ROWS_MIGHT_MATCH + + if not isinstance(field.field_type, PrimitiveType): + raise ValueError(f"Expected PrimitiveType: {field.field_type}") + + if lower_bound_bytes := self.lower_bounds.get(field_id): + lower_bound = from_bytes(field.field_type, lower_bound_bytes) + if self._is_nan(lower_bound): + # NaN indicates unreliable bounds. See the InclusiveMetricsEvaluator docs for more. + return ROWS_MIGHT_MATCH + + literals = {lit for lit in literals if lower_bound <= lit} + if len(literals) == 0: + return ROWS_CANNOT_MATCH + + if upper_bound_bytes := self.upper_bounds.get(field_id): + upper_bound = from_bytes(field.field_type, upper_bound_bytes) + # this is different from Java, here NaN is always larger + if self._is_nan(upper_bound): + return ROWS_MIGHT_MATCH + + literals = {lit for lit in literals if upper_bound >= lit} + if len(literals) == 0: + return ROWS_CANNOT_MATCH + + return ROWS_MIGHT_MATCH + + def visit_not_in(self, term: BoundTerm[L], literals: Set[L]) -> bool: + # because the bounds are not necessarily a min or max value, this cannot be answered using + # them. notIn(col, {X, ...}) with (X, Y) doesn't guarantee that X is a value in col. + return ROWS_MIGHT_MATCH + + def visit_starts_with(self, term: BoundTerm[L], literal: Literal[L]) -> bool: + field = term.ref().field + field_id: int = field.field_id + + if self._contains_nulls_only(field_id): + return ROWS_CANNOT_MATCH + + if not isinstance(field.field_type, PrimitiveType): + raise ValueError(f"Expected PrimitiveType: {field.field_type}") + + prefix = str(literal.value) + len_prefix = len(prefix) + + if lower_bound_bytes := self.lower_bounds.get(field_id): + lower_bound = str(from_bytes(field.field_type, lower_bound_bytes)) + + # truncate lower bound so that its length is not greater than the length of prefix + if lower_bound and lower_bound[:len_prefix] > prefix: + return ROWS_CANNOT_MATCH + + if upper_bound_bytes := self.upper_bounds.get(field_id): + upper_bound = str(from_bytes(field.field_type, upper_bound_bytes)) + + # truncate upper bound so that its length is not greater than the length of prefix + if upper_bound is not None and upper_bound[:len_prefix] < prefix: + return ROWS_CANNOT_MATCH + + return ROWS_MIGHT_MATCH + + def visit_not_starts_with(self, term: BoundTerm[L], literal: Literal[L]) -> bool: + field = term.ref().field + field_id: int = field.field_id + + if self._may_contain_null(field_id): + return ROWS_MIGHT_MATCH + + if not isinstance(field.field_type, PrimitiveType): + raise ValueError(f"Expected PrimitiveType: {field.field_type}") + + prefix = str(literal.value) + len_prefix = len(prefix) + + # not_starts_with will match unless all values must start with the prefix. This happens when + # the lower and upper bounds both start with the prefix. + if (lower_bound_bytes := self.lower_bounds.get(field_id)) and (upper_bound_bytes := self.upper_bounds.get(field_id)): + lower_bound = str(from_bytes(field.field_type, lower_bound_bytes)) + upper_bound = str(from_bytes(field.field_type, upper_bound_bytes)) + + # if lower is shorter than the prefix then lower doesn't start with the prefix + if len(lower_bound) < len_prefix: + return ROWS_MIGHT_MATCH + + if lower_bound[:len_prefix] == prefix: + # if upper is shorter than the prefix then upper can't start with the prefix + if len(upper_bound) < len_prefix: + return ROWS_MIGHT_MATCH + + if upper_bound[:len_prefix] == prefix: + return ROWS_CANNOT_MATCH + + return ROWS_MIGHT_MATCH diff --git a/pyiceberg/io/__init__.py b/pyiceberg/io/__init__.py new file mode 100644 index 0000000000..b55a896284 --- /dev/null +++ b/pyiceberg/io/__init__.py @@ -0,0 +1,346 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +"""Base FileIO classes for implementing reading and writing table files. + +The FileIO abstraction includes a subset of full filesystem implementations. Specifically, +Iceberg needs to read or write a file at a given location (as a seekable stream), as well +as check if a file exists. An implementation of the FileIO abstract base class is responsible +for returning an InputFile instance, an OutputFile instance, and deleting a file given +its location. +""" +from __future__ import annotations + +import importlib +import logging +import warnings +from abc import ABC, abstractmethod +from io import SEEK_SET +from types import TracebackType +from typing import ( + Dict, + List, + Optional, + Protocol, + Type, + Union, + runtime_checkable, +) +from urllib.parse import urlparse + +from pyiceberg.typedef import EMPTY_DICT, Properties + +logger = logging.getLogger(__name__) + +S3_ENDPOINT = "s3.endpoint" +S3_ACCESS_KEY_ID = "s3.access-key-id" +S3_SECRET_ACCESS_KEY = "s3.secret-access-key" +S3_SESSION_TOKEN = "s3.session-token" +S3_REGION = "s3.region" +S3_PROXY_URI = "s3.proxy-uri" +HDFS_HOST = "hdfs.host" +HDFS_PORT = "hdfs.port" +HDFS_USER = "hdfs.user" +HDFS_KERB_TICKET = "hdfs.kerberos_ticket" +GCS_TOKEN = "gcs.oauth2.token" +GCS_TOKEN_EXPIRES_AT_MS = "gcs.oauth2.token-expires-at" +GCS_PROJECT_ID = "gcs.project-id" +GCS_ACCESS = "gcs.access" +GCS_CONSISTENCY = "gcs.consistency" +GCS_CACHE_TIMEOUT = "gcs.cache-timeout" +GCS_REQUESTER_PAYS = "gcs.requester-pays" +GCS_SESSION_KWARGS = "gcs.session-kwargs" +GCS_ENDPOINT = "gcs.endpoint" +GCS_DEFAULT_LOCATION = "gcs.default-bucket-location" +GCS_VERSION_AWARE = "gcs.version-aware" + + +@runtime_checkable +class InputStream(Protocol): + """A protocol for the file-like object returned by InputFile.open(...). + + This outlines the minimally required methods for a seekable input stream returned from an InputFile + implementation's `open(...)` method. These methods are a subset of IOBase/RawIOBase. + """ + + @abstractmethod + def read(self, size: int = 0) -> bytes: + ... + + @abstractmethod + def seek(self, offset: int, whence: int = SEEK_SET) -> int: + ... + + @abstractmethod + def tell(self) -> int: + ... + + @abstractmethod + def close(self) -> None: + ... + + def __enter__(self) -> InputStream: + """Provide setup when opening an InputStream using a 'with' statement.""" + + @abstractmethod + def __exit__( + self, exctype: Optional[Type[BaseException]], excinst: Optional[BaseException], exctb: Optional[TracebackType] + ) -> None: + """Perform cleanup when exiting the scope of a 'with' statement.""" + + +@runtime_checkable +class OutputStream(Protocol): # pragma: no cover + """A protocol for the file-like object returned by OutputFile.create(...). + + This outlines the minimally required methods for a writable output stream returned from an OutputFile + implementation's `create(...)` method. These methods are a subset of IOBase/RawIOBase. + """ + + @abstractmethod + def write(self, b: bytes) -> int: + ... + + @abstractmethod + def close(self) -> None: + ... + + @abstractmethod + def __enter__(self) -> OutputStream: + """Provide setup when opening an OutputStream using a 'with' statement.""" + + @abstractmethod + def __exit__( + self, exctype: Optional[Type[BaseException]], excinst: Optional[BaseException], exctb: Optional[TracebackType] + ) -> None: + """Perform cleanup when exiting the scope of a 'with' statement.""" + + +class InputFile(ABC): + """A base class for InputFile implementations. + + Args: + location (str): A URI or a path to a local file. + + Attributes: + location (str): The URI or path to a local file for an InputFile instance. + exists (bool): Whether the file exists or not. + """ + + def __init__(self, location: str): + self._location = location + + @abstractmethod + def __len__(self) -> int: + """Return the total length of the file, in bytes.""" + + @property + def location(self) -> str: + """The fully-qualified location of the input file.""" + return self._location + + @abstractmethod + def exists(self) -> bool: + """Check whether the location exists. + + Raises: + PermissionError: If the file at self.location cannot be accessed due to a permission error. + """ + + @abstractmethod + def open(self, seekable: bool = True) -> InputStream: + """Return an object that matches the InputStream protocol. + + Args: + seekable: If the stream should support seek, or if it is consumed sequential. + + Returns: + InputStream: An object that matches the InputStream protocol. + + Raises: + PermissionError: If the file at self.location cannot be accessed due to a permission error. + FileNotFoundError: If the file at self.location does not exist. + """ + + +class OutputFile(ABC): + """A base class for OutputFile implementations. + + Args: + location (str): A URI or a path to a local file. + + Attributes: + location (str): The URI or path to a local file for an OutputFile instance. + exists (bool): Whether the file exists or not. + """ + + def __init__(self, location: str): + self._location = location + + @abstractmethod + def __len__(self) -> int: + """Return the total length of the file, in bytes.""" + + @property + def location(self) -> str: + """The fully-qualified location of the output file.""" + return self._location + + @abstractmethod + def exists(self) -> bool: + """Check whether the location exists. + + Raises: + PermissionError: If the file at self.location cannot be accessed due to a permission error. + """ + + @abstractmethod + def to_input_file(self) -> InputFile: + """Return an InputFile for the location of this output file.""" + + @abstractmethod + def create(self, overwrite: bool = False) -> OutputStream: + """Return an object that matches the OutputStream protocol. + + Args: + overwrite (bool): If the file already exists at `self.location` + and `overwrite` is False a FileExistsError should be raised. + + Returns: + OutputStream: An object that matches the OutputStream protocol. + + Raises: + PermissionError: If the file at self.location cannot be accessed due to a permission error. + FileExistsError: If the file at self.location already exists and `overwrite=False`. + """ + + +class FileIO(ABC): + """A base class for FileIO implementations.""" + + properties: Properties + + def __init__(self, properties: Properties = EMPTY_DICT): + self.properties = properties + + @abstractmethod + def new_input(self, location: str) -> InputFile: + """Get an InputFile instance to read bytes from the file at the given location. + + Args: + location (str): A URI or a path to a local file. + """ + + @abstractmethod + def new_output(self, location: str) -> OutputFile: + """Get an OutputFile instance to write bytes to the file at the given location. + + Args: + location (str): A URI or a path to a local file. + """ + + @abstractmethod + def delete(self, location: Union[str, InputFile, OutputFile]) -> None: + """Delete the file at the given path. + + Args: + location (Union[str, InputFile, OutputFile]): A URI or a path to a local file--if an InputFile instance or + an OutputFile instance is provided, the location attribute for that instance is used as the URI to delete. + + Raises: + PermissionError: If the file at location cannot be accessed due to a permission error. + FileNotFoundError: When the file at the provided location does not exist. + """ + + +LOCATION = "location" +WAREHOUSE = "warehouse" + +ARROW_FILE_IO = "pyiceberg.io.pyarrow.PyArrowFileIO" +FSSPEC_FILE_IO = "pyiceberg.io.fsspec.FsspecFileIO" + +# Mappings from the Java FileIO impl to a Python one. The list is ordered by preference. +# If an implementation isn't installed, it will fall back to the next one. +SCHEMA_TO_FILE_IO: Dict[str, List[str]] = { + "s3": [ARROW_FILE_IO, FSSPEC_FILE_IO], + "s3a": [ARROW_FILE_IO, FSSPEC_FILE_IO], + "s3n": [ARROW_FILE_IO, FSSPEC_FILE_IO], + "gs": [ARROW_FILE_IO], + "file": [ARROW_FILE_IO], + "hdfs": [ARROW_FILE_IO], + "abfs": [FSSPEC_FILE_IO], + "abfss": [FSSPEC_FILE_IO], +} + + +def _import_file_io(io_impl: str, properties: Properties) -> Optional[FileIO]: + try: + path_parts = io_impl.split(".") + if len(path_parts) < 2: + raise ValueError(f"py-io-impl should be full path (module.CustomFileIO), got: {io_impl}") + module_name, class_name = ".".join(path_parts[:-1]), path_parts[-1] + module = importlib.import_module(module_name) + class_ = getattr(module, class_name) + return class_(properties) + except ModuleNotFoundError: + logger.warning("Could not initialize FileIO: %s", io_impl) + return None + + +PY_IO_IMPL = "py-io-impl" + + +def _infer_file_io_from_scheme(path: str, properties: Properties) -> Optional[FileIO]: + parsed_url = urlparse(path) + if parsed_url.scheme: + if file_ios := SCHEMA_TO_FILE_IO.get(parsed_url.scheme): + for file_io_path in file_ios: + if file_io := _import_file_io(file_io_path, properties): + return file_io + else: + warnings.warn(f"No preferred file implementation for scheme: {parsed_url.scheme}") + return None + + +def load_file_io(properties: Properties = EMPTY_DICT, location: Optional[str] = None) -> FileIO: + # First look for the py-io-impl property to directly load the class + if io_impl := properties.get(PY_IO_IMPL): + if file_io := _import_file_io(io_impl, properties): + logger.info("Loaded FileIO: %s", io_impl) + return file_io + else: + raise ValueError(f"Could not initialize FileIO: {io_impl}") + + # Check the table location + if location: + if file_io := _infer_file_io_from_scheme(location, properties): + return file_io + + # Look at the schema of the warehouse + if warehouse_location := properties.get(WAREHOUSE): + if file_io := _infer_file_io_from_scheme(warehouse_location, properties): + return file_io + + try: + # Default to PyArrow + logger.info("Defaulting to PyArrow FileIO") + from pyiceberg.io.pyarrow import PyArrowFileIO + + return PyArrowFileIO(properties) + except ModuleNotFoundError as e: + raise ModuleNotFoundError( + 'Could not load a FileIO, please consider installing one: pip3 install "pyiceberg[s3fs]", for more options refer to the docs.' + ) from e diff --git a/pyiceberg/io/fsspec.py b/pyiceberg/io/fsspec.py new file mode 100644 index 0000000000..97a01f238a --- /dev/null +++ b/pyiceberg/io/fsspec.py @@ -0,0 +1,334 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +"""FileIO implementation for reading and writing table files that uses fsspec compatible filesystems.""" +import errno +import json +import logging +import os +from functools import lru_cache, partial +from typing import ( + Any, + Callable, + Dict, + Union, +) +from urllib.parse import urlparse + +import requests +from botocore import UNSIGNED +from botocore.awsrequest import AWSRequest +from fsspec import AbstractFileSystem +from fsspec.implementations.local import LocalFileSystem +from requests import HTTPError + +from pyiceberg.catalog import TOKEN +from pyiceberg.exceptions import SignError +from pyiceberg.io import ( + GCS_ACCESS, + GCS_CACHE_TIMEOUT, + GCS_CONSISTENCY, + GCS_DEFAULT_LOCATION, + GCS_ENDPOINT, + GCS_PROJECT_ID, + GCS_REQUESTER_PAYS, + GCS_SESSION_KWARGS, + GCS_TOKEN, + GCS_VERSION_AWARE, + S3_ACCESS_KEY_ID, + S3_ENDPOINT, + S3_PROXY_URI, + S3_REGION, + S3_SECRET_ACCESS_KEY, + S3_SESSION_TOKEN, + FileIO, + InputFile, + InputStream, + OutputFile, + OutputStream, +) +from pyiceberg.typedef import Properties + +logger = logging.getLogger(__name__) + + +def s3v4_rest_signer(properties: Properties, request: AWSRequest, **_: Any) -> AWSRequest: + if TOKEN not in properties: + raise SignError("Signer set, but token is not available") + + signer_url = properties["uri"].rstrip("/") + signer_headers = {"Authorization": f"Bearer {properties[TOKEN]}"} + signer_body = { + "method": request.method, + "region": request.context["client_region"], + "uri": request.url, + "headers": {key: [val] for key, val in request.headers.items()}, + } + + response = requests.post(f"{signer_url}/v1/aws/s3/sign", headers=signer_headers, json=signer_body) + try: + response.raise_for_status() + response_json = response.json() + except HTTPError as e: + raise SignError(f"Failed to sign request {response.status_code}: {signer_body}") from e + + for key, value in response_json["headers"].items(): + request.headers.add_header(key, ", ".join(value)) + + request.url = response_json["uri"] + + return request + + +SIGNERS: Dict[str, Callable[[Properties, AWSRequest], AWSRequest]] = {"S3V4RestSigner": s3v4_rest_signer} + + +def _file(_: Properties) -> LocalFileSystem: + return LocalFileSystem() + + +def _s3(properties: Properties) -> AbstractFileSystem: + from s3fs import S3FileSystem + + client_kwargs = { + "endpoint_url": properties.get(S3_ENDPOINT), + "aws_access_key_id": properties.get(S3_ACCESS_KEY_ID), + "aws_secret_access_key": properties.get(S3_SECRET_ACCESS_KEY), + "aws_session_token": properties.get(S3_SESSION_TOKEN), + "region_name": properties.get(S3_REGION), + } + config_kwargs = {} + register_events: Dict[str, Callable[[Properties], None]] = {} + + if signer := properties.get("s3.signer"): + logger.info("Loading signer %s", signer) + if singer_func := SIGNERS.get(signer): + singer_func_with_properties = partial(singer_func, properties) + register_events["before-sign.s3"] = singer_func_with_properties + + # Disable the AWS Signer + config_kwargs["signature_version"] = UNSIGNED + else: + raise ValueError(f"Signer not available: {signer}") + + if proxy_uri := properties.get(S3_PROXY_URI): + config_kwargs["proxies"] = {"http": proxy_uri, "https": proxy_uri} + + fs = S3FileSystem(client_kwargs=client_kwargs, config_kwargs=config_kwargs) + + for event_name, event_function in register_events.items(): + fs.s3.meta.events.register_last(event_name, event_function, unique_id=1925) + + return fs + + +def _gs(properties: Properties) -> AbstractFileSystem: + # https://gcsfs.readthedocs.io/en/latest/api.html#gcsfs.core.GCSFileSystem + from gcsfs import GCSFileSystem + + return GCSFileSystem( + project=properties.get(GCS_PROJECT_ID), + access=properties.get(GCS_ACCESS, "full_control"), + token=properties.get(GCS_TOKEN), + consistency=properties.get(GCS_CONSISTENCY, "none"), + cache_timeout=properties.get(GCS_CACHE_TIMEOUT), + requester_pays=properties.get(GCS_REQUESTER_PAYS, False), + session_kwargs=json.loads(properties.get(GCS_SESSION_KWARGS, "{}")), + endpoint_url=properties.get(GCS_ENDPOINT), + default_location=properties.get(GCS_DEFAULT_LOCATION), + version_aware=properties.get(GCS_VERSION_AWARE, "false").lower() == "true", + ) + + +def _adlfs(properties: Properties) -> AbstractFileSystem: + from adlfs import AzureBlobFileSystem + + return AzureBlobFileSystem( + connection_string=properties.get("adlfs.connection-string"), + account_name=properties.get("adlfs.account-name"), + account_key=properties.get("adlfs.account-key"), + sas_token=properties.get("adlfs.sas-token"), + tenant_id=properties.get("adlfs.tenant-id"), + client_id=properties.get("adlfs.client-id"), + client_secret=properties.get("adlfs.client-secret"), + ) + + +SCHEME_TO_FS = { + "file": _file, + "s3": _s3, + "s3a": _s3, + "s3n": _s3, + "abfs": _adlfs, + "abfss": _adlfs, + "gs": _gs, + "gcs": _gs, +} + + +class FsspecInputFile(InputFile): + """An input file implementation for the FsspecFileIO. + + Args: + location (str): A URI to a file location. + fs (AbstractFileSystem): An fsspec filesystem instance. + """ + + def __init__(self, location: str, fs: AbstractFileSystem): + self._fs = fs + super().__init__(location=location) + + def __len__(self) -> int: + """Return the total length of the file, in bytes.""" + object_info = self._fs.info(self.location) + if size := object_info.get("Size"): + return size + elif size := object_info.get("size"): + return size + raise RuntimeError(f"Cannot retrieve object info: {self.location}") + + def exists(self) -> bool: + """Check whether the location exists.""" + return self._fs.lexists(self.location) + + def open(self, seekable: bool = True) -> InputStream: + """Create an input stream for reading the contents of the file. + + Args: + seekable: If the stream should support seek, or if it is consumed sequential. + + Returns: + OpenFile: An fsspec compliant file-like object. + + Raises: + FileNotFoundError: If the file does not exist. + """ + try: + return self._fs.open(self.location, "rb") + except FileNotFoundError as e: + # To have a consistent error handling experience, make sure exception contains missing file location. + raise e if e.filename else FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), self.location) from e + + +class FsspecOutputFile(OutputFile): + """An output file implementation for the FsspecFileIO. + + Args: + location (str): A URI to a file location. + fs (AbstractFileSystem): An fsspec filesystem instance. + """ + + def __init__(self, location: str, fs: AbstractFileSystem): + self._fs = fs + super().__init__(location=location) + + def __len__(self) -> int: + """Return the total length of the file, in bytes.""" + object_info = self._fs.info(self.location) + if size := object_info.get("Size"): + return size + elif size := object_info.get("size"): + return size + raise RuntimeError(f"Cannot retrieve object info: {self.location}") + + def exists(self) -> bool: + """Check whether the location exists.""" + return self._fs.lexists(self.location) + + def create(self, overwrite: bool = False) -> OutputStream: + """Create an output stream for reading the contents of the file. + + Args: + overwrite (bool): Whether to overwrite the file if it already exists. + + Returns: + OpenFile: An fsspec compliant file-like object. + + Raises: + FileExistsError: If the file already exists at the location and overwrite is set to False. + + Note: + If overwrite is set to False, a check is first performed to verify that the file does not exist. + This is not thread-safe and a possibility does exist that the file can be created by a concurrent + process after the existence check yet before the output stream is created. In such a case, the default + behavior will truncate the contents of the existing file when opening the output stream. + """ + if not overwrite and self.exists(): + raise FileExistsError(f"Cannot create file, file already exists: {self.location}") + return self._fs.open(self.location, "wb") + + def to_input_file(self) -> FsspecInputFile: + """Return a new FsspecInputFile for the location at `self.location`.""" + return FsspecInputFile(location=self.location, fs=self._fs) + + +class FsspecFileIO(FileIO): + """A FileIO implementation that uses fsspec.""" + + def __init__(self, properties: Properties): + self._scheme_to_fs = {} + self._scheme_to_fs.update(SCHEME_TO_FS) + self.get_fs: Callable[[str], AbstractFileSystem] = lru_cache(self._get_fs) + super().__init__(properties=properties) + + def new_input(self, location: str) -> FsspecInputFile: + """Get an FsspecInputFile instance to read bytes from the file at the given location. + + Args: + location (str): A URI or a path to a local file. + + Returns: + FsspecInputFile: An FsspecInputFile instance for the given location. + """ + uri = urlparse(location) + fs = self.get_fs(uri.scheme) + return FsspecInputFile(location=location, fs=fs) + + def new_output(self, location: str) -> FsspecOutputFile: + """Get an FsspecOutputFile instance to write bytes to the file at the given location. + + Args: + location (str): A URI or a path to a local file. + + Returns: + FsspecOutputFile: An FsspecOutputFile instance for the given location. + """ + uri = urlparse(location) + fs = self.get_fs(uri.scheme) + return FsspecOutputFile(location=location, fs=fs) + + def delete(self, location: Union[str, InputFile, OutputFile]) -> None: + """Delete the file at the given location. + + Args: + location (Union[str, InputFile, OutputFile]): The URI to the file--if an InputFile instance or an + OutputFile instance is provided, the location attribute for that instance is used as the location + to delete. + """ + if isinstance(location, (InputFile, OutputFile)): + str_location = location.location # Use InputFile or OutputFile location + else: + str_location = location + + uri = urlparse(str_location) + fs = self.get_fs(uri.scheme) + fs.rm(str_location) + + def _get_fs(self, scheme: str) -> AbstractFileSystem: + """Get a filesystem for a specific scheme.""" + if scheme not in self._scheme_to_fs: + raise ValueError(f"No registered filesystem for scheme: {scheme}") + return self._scheme_to_fs[scheme](self.properties) diff --git a/pyiceberg/io/pyarrow.py b/pyiceberg/io/pyarrow.py new file mode 100644 index 0000000000..f2d60e7534 --- /dev/null +++ b/pyiceberg/io/pyarrow.py @@ -0,0 +1,1528 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# pylint: disable=redefined-outer-name,arguments-renamed,fixme +"""FileIO implementation for reading and writing table files that uses pyarrow.fs. + +This file contains a FileIO implementation that relies on the filesystem interface provided +by PyArrow. It relies on PyArrow's `from_uri` method that infers the correct filesystem +type to use. Theoretically, this allows the supported storage types to grow naturally +with the pyarrow library. +""" +from __future__ import annotations + +import concurrent.futures +import logging +import os +import re +from abc import ABC, abstractmethod +from concurrent.futures import Future +from dataclasses import dataclass +from enum import Enum +from functools import lru_cache, singledispatch +from itertools import chain +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Dict, + Generic, + Iterable, + Iterator, + List, + Optional, + Set, + Tuple, + TypeVar, + Union, + cast, +) +from urllib.parse import urlparse + +import numpy as np +import pyarrow as pa +import pyarrow.compute as pc +import pyarrow.dataset as ds +import pyarrow.lib +import pyarrow.parquet as pq +from pyarrow import ChunkedArray +from pyarrow.fs import ( + FileInfo, + FileSystem, + FileType, + FSSpecHandler, +) +from sortedcontainers import SortedList + +from pyiceberg.avro.resolver import ResolveError +from pyiceberg.conversions import to_bytes +from pyiceberg.expressions import ( + AlwaysTrue, + BooleanExpression, + BoundTerm, + Literal, +) +from pyiceberg.expressions.visitors import ( + BoundBooleanExpressionVisitor, + bind, + extract_field_ids, + translate_column_names, +) +from pyiceberg.expressions.visitors import visit as boolean_expression_visit +from pyiceberg.io import ( + GCS_DEFAULT_LOCATION, + GCS_ENDPOINT, + GCS_TOKEN, + GCS_TOKEN_EXPIRES_AT_MS, + HDFS_HOST, + HDFS_KERB_TICKET, + HDFS_PORT, + HDFS_USER, + S3_ACCESS_KEY_ID, + S3_ENDPOINT, + S3_PROXY_URI, + S3_REGION, + S3_SECRET_ACCESS_KEY, + S3_SESSION_TOKEN, + FileIO, + InputFile, + InputStream, + OutputFile, + OutputStream, +) +from pyiceberg.manifest import DataFile, FileFormat +from pyiceberg.schema import ( + PartnerAccessor, + PreOrderSchemaVisitor, + Schema, + SchemaVisitorPerPrimitiveType, + SchemaWithPartnerVisitor, + pre_order_visit, + promote, + prune_columns, + visit, + visit_with_partner, +) +from pyiceberg.transforms import TruncateTransform +from pyiceberg.typedef import EMPTY_DICT, Properties +from pyiceberg.types import ( + BinaryType, + BooleanType, + DateType, + DecimalType, + DoubleType, + FixedType, + FloatType, + IcebergType, + IntegerType, + ListType, + LongType, + MapType, + NestedField, + PrimitiveType, + StringType, + StructType, + TimestampType, + TimestamptzType, + TimeType, + UUIDType, +) +from pyiceberg.utils.concurrent import ExecutorFactory +from pyiceberg.utils.datetime import millis_to_datetime +from pyiceberg.utils.singleton import Singleton +from pyiceberg.utils.truncate import truncate_upper_bound_binary_string, truncate_upper_bound_text_string + +if TYPE_CHECKING: + from pyiceberg.table import FileScanTask, Table + +logger = logging.getLogger(__name__) + +ONE_MEGABYTE = 1024 * 1024 +BUFFER_SIZE = "buffer-size" +ICEBERG_SCHEMA = b"iceberg.schema" +FIELD_ID = "field_id" +DOC = "doc" +PYARROW_FIELD_ID_KEYS = [b"PARQUET:field_id", b"field_id"] +PYARROW_FIELD_DOC_KEYS = [b"PARQUET:field_doc", b"field_doc", b"doc"] + +T = TypeVar("T") + + +class PyArrowFile(InputFile, OutputFile): + """A combined InputFile and OutputFile implementation that uses a pyarrow filesystem to generate pyarrow.lib.NativeFile instances. + + Args: + location (str): A URI or a path to a local file. + + Attributes: + location(str): The URI or path to a local file for a PyArrowFile instance. + + Examples: + >>> from pyiceberg.io.pyarrow import PyArrowFile + >>> # input_file = PyArrowFile("s3://foo/bar.txt") + >>> # Read the contents of the PyArrowFile instance + >>> # Make sure that you have permissions to read/write + >>> # file_content = input_file.open().read() + + >>> # output_file = PyArrowFile("s3://baz/qux.txt") + >>> # Write bytes to a file + >>> # Make sure that you have permissions to read/write + >>> # output_file.create().write(b'foobytes') + """ + + _fs: FileSystem + _path: str + _buffer_size: int + + def __init__(self, location: str, path: str, fs: FileSystem, buffer_size: int = ONE_MEGABYTE): + self._filesystem = fs + self._path = path + self._buffer_size = buffer_size + super().__init__(location=location) + + def _file_info(self) -> FileInfo: + """Retrieve a pyarrow.fs.FileInfo object for the location. + + Raises: + PermissionError: If the file at self.location cannot be accessed due to a permission error such as + an AWS error code 15. + """ + try: + file_info = self._filesystem.get_file_info(self._path) + except OSError as e: + if e.errno == 13 or "AWS Error [code 15]" in str(e): + raise PermissionError(f"Cannot get file info, access denied: {self.location}") from e + raise # pragma: no cover - If some other kind of OSError, raise the raw error + + if file_info.type == FileType.NotFound: + raise FileNotFoundError(f"Cannot get file info, file not found: {self.location}") + return file_info + + def __len__(self) -> int: + """Return the total length of the file, in bytes.""" + file_info = self._file_info() + return file_info.size + + def exists(self) -> bool: + """Check whether the location exists.""" + try: + self._file_info() # raises FileNotFoundError if it does not exist + return True + except FileNotFoundError: + return False + + def open(self, seekable: bool = True) -> InputStream: + """Open the location using a PyArrow FileSystem inferred from the location. + + Args: + seekable: If the stream should support seek, or if it is consumed sequential. + + Returns: + pyarrow.lib.NativeFile: A NativeFile instance for the file located at `self.location`. + + Raises: + FileNotFoundError: If the file at self.location does not exist. + PermissionError: If the file at self.location cannot be accessed due to a permission error such as + an AWS error code 15. + """ + try: + if seekable: + input_file = self._filesystem.open_input_file(self._path) + else: + input_file = self._filesystem.open_input_stream(self._path, buffer_size=self._buffer_size) + except FileNotFoundError: + raise + except PermissionError: + raise + except OSError as e: + if e.errno == 2 or "Path does not exist" in str(e): + raise FileNotFoundError(f"Cannot open file, does not exist: {self.location}") from e + elif e.errno == 13 or "AWS Error [code 15]" in str(e): + raise PermissionError(f"Cannot open file, access denied: {self.location}") from e + raise # pragma: no cover - If some other kind of OSError, raise the raw error + return input_file + + def create(self, overwrite: bool = False) -> OutputStream: + """Create a writable pyarrow.lib.NativeFile for this PyArrowFile's location. + + Args: + overwrite (bool): Whether to overwrite the file if it already exists. + + Returns: + pyarrow.lib.NativeFile: A NativeFile instance for the file located at self.location. + + Raises: + FileExistsError: If the file already exists at `self.location` and `overwrite` is False. + + Note: + This retrieves a pyarrow NativeFile by opening an output stream. If overwrite is set to False, + a check is first performed to verify that the file does not exist. This is not thread-safe and + a possibility does exist that the file can be created by a concurrent process after the existence + check yet before the output stream is created. In such a case, the default pyarrow behavior will + truncate the contents of the existing file when opening the output stream. + """ + try: + if not overwrite and self.exists() is True: + raise FileExistsError(f"Cannot create file, already exists: {self.location}") + output_file = self._filesystem.open_output_stream(self._path, buffer_size=self._buffer_size) + except PermissionError: + raise + except OSError as e: + if e.errno == 13 or "AWS Error [code 15]" in str(e): + raise PermissionError(f"Cannot create file, access denied: {self.location}") from e + raise # pragma: no cover - If some other kind of OSError, raise the raw error + return output_file + + def to_input_file(self) -> PyArrowFile: + """Return a new PyArrowFile for the location of an existing PyArrowFile instance. + + This method is included to abide by the OutputFile abstract base class. Since this implementation uses a single + PyArrowFile class (as opposed to separate InputFile and OutputFile implementations), this method effectively returns + a copy of the same instance. + """ + return self + + +class PyArrowFileIO(FileIO): + fs_by_scheme: Callable[[str, Optional[str]], FileSystem] + + def __init__(self, properties: Properties = EMPTY_DICT): + self.fs_by_scheme: Callable[[str, Optional[str]], FileSystem] = lru_cache(self._initialize_fs) + super().__init__(properties=properties) + + @staticmethod + def parse_location(location: str) -> Tuple[str, str, str]: + """Return the path without the scheme.""" + uri = urlparse(location) + if not uri.scheme: + return "file", uri.netloc, os.path.abspath(location) + elif uri.scheme == "hdfs": + return uri.scheme, uri.netloc, location + else: + return uri.scheme, uri.netloc, f"{uri.netloc}{uri.path}" + + def _initialize_fs(self, scheme: str, netloc: Optional[str] = None) -> FileSystem: + if scheme in {"s3", "s3a", "s3n"}: + from pyarrow.fs import S3FileSystem + + client_kwargs = { + "endpoint_override": self.properties.get(S3_ENDPOINT), + "access_key": self.properties.get(S3_ACCESS_KEY_ID), + "secret_key": self.properties.get(S3_SECRET_ACCESS_KEY), + "session_token": self.properties.get(S3_SESSION_TOKEN), + "region": self.properties.get(S3_REGION), + } + + if proxy_uri := self.properties.get(S3_PROXY_URI): + client_kwargs["proxy_options"] = proxy_uri + + return S3FileSystem(**client_kwargs) + elif scheme == "hdfs": + from pyarrow.fs import HadoopFileSystem + + hdfs_kwargs: Dict[str, Any] = {} + if netloc: + return HadoopFileSystem.from_uri(f"hdfs://{netloc}") + if host := self.properties.get(HDFS_HOST): + hdfs_kwargs["host"] = host + if port := self.properties.get(HDFS_PORT): + # port should be an integer type + hdfs_kwargs["port"] = int(port) + if user := self.properties.get(HDFS_USER): + hdfs_kwargs["user"] = user + if kerb_ticket := self.properties.get(HDFS_KERB_TICKET): + hdfs_kwargs["kerb_ticket"] = kerb_ticket + + return HadoopFileSystem(**hdfs_kwargs) + elif scheme in {"gs", "gcs"}: + from pyarrow.fs import GcsFileSystem + + gcs_kwargs: Dict[str, Any] = {} + if access_token := self.properties.get(GCS_TOKEN): + gcs_kwargs["access_token"] = access_token + if expiration := self.properties.get(GCS_TOKEN_EXPIRES_AT_MS): + gcs_kwargs["credential_token_expiration"] = millis_to_datetime(int(expiration)) + if bucket_location := self.properties.get(GCS_DEFAULT_LOCATION): + gcs_kwargs["default_bucket_location"] = bucket_location + if endpoint := self.properties.get(GCS_ENDPOINT): + url_parts = urlparse(endpoint) + gcs_kwargs["scheme"] = url_parts.scheme + gcs_kwargs["endpoint_override"] = url_parts.netloc + + return GcsFileSystem(**gcs_kwargs) + elif scheme == "file": + from pyarrow.fs import LocalFileSystem + + return LocalFileSystem() + else: + raise ValueError(f"Unrecognized filesystem type in URI: {scheme}") + + def new_input(self, location: str) -> PyArrowFile: + """Get a PyArrowFile instance to read bytes from the file at the given location. + + Args: + location (str): A URI or a path to a local file. + + Returns: + PyArrowFile: A PyArrowFile instance for the given location. + """ + scheme, netloc, path = self.parse_location(location) + return PyArrowFile( + fs=self.fs_by_scheme(scheme, netloc), + location=location, + path=path, + buffer_size=int(self.properties.get(BUFFER_SIZE, ONE_MEGABYTE)), + ) + + def new_output(self, location: str) -> PyArrowFile: + """Get a PyArrowFile instance to write bytes to the file at the given location. + + Args: + location (str): A URI or a path to a local file. + + Returns: + PyArrowFile: A PyArrowFile instance for the given location. + """ + scheme, netloc, path = self.parse_location(location) + return PyArrowFile( + fs=self.fs_by_scheme(scheme, netloc), + location=location, + path=path, + buffer_size=int(self.properties.get(BUFFER_SIZE, ONE_MEGABYTE)), + ) + + def delete(self, location: Union[str, InputFile, OutputFile]) -> None: + """Delete the file at the given location. + + Args: + location (Union[str, InputFile, OutputFile]): The URI to the file--if an InputFile instance or an OutputFile instance is provided, + the location attribute for that instance is used as the location to delete. + + Raises: + FileNotFoundError: When the file at the provided location does not exist. + PermissionError: If the file at the provided location cannot be accessed due to a permission error such as + an AWS error code 15. + """ + str_location = location.location if isinstance(location, (InputFile, OutputFile)) else location + scheme, netloc, path = self.parse_location(str_location) + fs = self.fs_by_scheme(scheme, netloc) + + try: + fs.delete_file(path) + except FileNotFoundError: + raise + except PermissionError: + raise + except OSError as e: + if e.errno == 2 or "Path does not exist" in str(e): + raise FileNotFoundError(f"Cannot delete file, does not exist: {location}") from e + elif e.errno == 13 or "AWS Error [code 15]" in str(e): + raise PermissionError(f"Cannot delete file, access denied: {location}") from e + raise # pragma: no cover - If some other kind of OSError, raise the raw error + + +def schema_to_pyarrow(schema: Union[Schema, IcebergType]) -> pa.schema: + return visit(schema, _ConvertToArrowSchema()) + + +class _ConvertToArrowSchema(SchemaVisitorPerPrimitiveType[pa.DataType], Singleton): + def schema(self, _: Schema, struct_result: pa.StructType) -> pa.schema: + return pa.schema(list(struct_result)) + + def struct(self, _: StructType, field_results: List[pa.DataType]) -> pa.DataType: + return pa.struct(field_results) + + def field(self, field: NestedField, field_result: pa.DataType) -> pa.Field: + return pa.field( + name=field.name, + type=field_result, + nullable=field.optional, + metadata={DOC: field.doc, FIELD_ID: str(field.field_id)} if field.doc else {FIELD_ID: str(field.field_id)}, + ) + + def list(self, list_type: ListType, element_result: pa.DataType) -> pa.DataType: + element_field = self.field(list_type.element_field, element_result) + return pa.list_(value_type=element_field) + + def map(self, map_type: MapType, key_result: pa.DataType, value_result: pa.DataType) -> pa.DataType: + key_field = self.field(map_type.key_field, key_result) + value_field = self.field(map_type.value_field, value_result) + return pa.map_(key_type=key_field, item_type=value_field) + + def visit_fixed(self, fixed_type: FixedType) -> pa.DataType: + return pa.binary(len(fixed_type)) + + def visit_decimal(self, decimal_type: DecimalType) -> pa.DataType: + return pa.decimal128(decimal_type.precision, decimal_type.scale) + + def visit_boolean(self, _: BooleanType) -> pa.DataType: + return pa.bool_() + + def visit_integer(self, _: IntegerType) -> pa.DataType: + return pa.int32() + + def visit_long(self, _: LongType) -> pa.DataType: + return pa.int64() + + def visit_float(self, _: FloatType) -> pa.DataType: + # 32-bit IEEE 754 floating point + return pa.float32() + + def visit_double(self, _: DoubleType) -> pa.DataType: + # 64-bit IEEE 754 floating point + return pa.float64() + + def visit_date(self, _: DateType) -> pa.DataType: + # Date encoded as an int + return pa.date32() + + def visit_time(self, _: TimeType) -> pa.DataType: + return pa.time64("us") + + def visit_timestamp(self, _: TimestampType) -> pa.DataType: + return pa.timestamp(unit="us") + + def visit_timestamptz(self, _: TimestamptzType) -> pa.DataType: + return pa.timestamp(unit="us", tz="UTC") + + def visit_string(self, _: StringType) -> pa.DataType: + return pa.string() + + def visit_uuid(self, _: UUIDType) -> pa.DataType: + return pa.binary(16) + + def visit_binary(self, _: BinaryType) -> pa.DataType: + return pa.binary() + + +def _convert_scalar(value: Any, iceberg_type: IcebergType) -> pa.scalar: + if not isinstance(iceberg_type, PrimitiveType): + raise ValueError(f"Expected primitive type, got: {iceberg_type}") + return pa.scalar(value=value, type=schema_to_pyarrow(iceberg_type)) + + +class _ConvertToArrowExpression(BoundBooleanExpressionVisitor[pc.Expression]): + def visit_in(self, term: BoundTerm[pc.Expression], literals: Set[Any]) -> pc.Expression: + pyarrow_literals = pa.array(literals, type=schema_to_pyarrow(term.ref().field.field_type)) + return pc.field(term.ref().field.name).isin(pyarrow_literals) + + def visit_not_in(self, term: BoundTerm[pc.Expression], literals: Set[Any]) -> pc.Expression: + pyarrow_literals = pa.array(literals, type=schema_to_pyarrow(term.ref().field.field_type)) + return ~pc.field(term.ref().field.name).isin(pyarrow_literals) + + def visit_is_nan(self, term: BoundTerm[Any]) -> pc.Expression: + ref = pc.field(term.ref().field.name) + return pc.is_nan(ref) + + def visit_not_nan(self, term: BoundTerm[Any]) -> pc.Expression: + ref = pc.field(term.ref().field.name) + return ~pc.is_nan(ref) + + def visit_is_null(self, term: BoundTerm[Any]) -> pc.Expression: + return pc.field(term.ref().field.name).is_null(nan_is_null=False) + + def visit_not_null(self, term: BoundTerm[Any]) -> pc.Expression: + return pc.field(term.ref().field.name).is_valid() + + def visit_equal(self, term: BoundTerm[Any], literal: Literal[Any]) -> pc.Expression: + return pc.field(term.ref().field.name) == _convert_scalar(literal.value, term.ref().field.field_type) + + def visit_not_equal(self, term: BoundTerm[Any], literal: Literal[Any]) -> pc.Expression: + return pc.field(term.ref().field.name) != _convert_scalar(literal.value, term.ref().field.field_type) + + def visit_greater_than_or_equal(self, term: BoundTerm[Any], literal: Literal[Any]) -> pc.Expression: + return pc.field(term.ref().field.name) >= _convert_scalar(literal.value, term.ref().field.field_type) + + def visit_greater_than(self, term: BoundTerm[Any], literal: Literal[Any]) -> pc.Expression: + return pc.field(term.ref().field.name) > _convert_scalar(literal.value, term.ref().field.field_type) + + def visit_less_than(self, term: BoundTerm[Any], literal: Literal[Any]) -> pc.Expression: + return pc.field(term.ref().field.name) < _convert_scalar(literal.value, term.ref().field.field_type) + + def visit_less_than_or_equal(self, term: BoundTerm[Any], literal: Literal[Any]) -> pc.Expression: + return pc.field(term.ref().field.name) <= _convert_scalar(literal.value, term.ref().field.field_type) + + def visit_starts_with(self, term: BoundTerm[Any], literal: Literal[Any]) -> pc.Expression: + return pc.starts_with(pc.field(term.ref().field.name), literal.value) + + def visit_not_starts_with(self, term: BoundTerm[Any], literal: Literal[Any]) -> pc.Expression: + return ~pc.starts_with(pc.field(term.ref().field.name), literal.value) + + def visit_true(self) -> pc.Expression: + return pc.scalar(True) + + def visit_false(self) -> pc.Expression: + return pc.scalar(False) + + def visit_not(self, child_result: pc.Expression) -> pc.Expression: + return ~child_result + + def visit_and(self, left_result: pc.Expression, right_result: pc.Expression) -> pc.Expression: + return left_result & right_result + + def visit_or(self, left_result: pc.Expression, right_result: pc.Expression) -> pc.Expression: + return left_result | right_result + + +def expression_to_pyarrow(expr: BooleanExpression) -> pc.Expression: + return boolean_expression_visit(expr, _ConvertToArrowExpression()) + + +@lru_cache +def _get_file_format(file_format: FileFormat, **kwargs: Dict[str, Any]) -> ds.FileFormat: + if file_format == FileFormat.PARQUET: + return ds.ParquetFileFormat(**kwargs) + else: + raise ValueError(f"Unsupported file format: {file_format}") + + +def _construct_fragment(fs: FileSystem, data_file: DataFile, file_format_kwargs: Dict[str, Any] = EMPTY_DICT) -> ds.Fragment: + _, _, path = PyArrowFileIO.parse_location(data_file.file_path) + return _get_file_format(data_file.file_format, **file_format_kwargs).make_fragment(path, fs) + + +def _read_deletes(fs: FileSystem, data_file: DataFile) -> Dict[str, pa.ChunkedArray]: + delete_fragment = _construct_fragment( + fs, data_file, file_format_kwargs={"dictionary_columns": ("file_path",), "pre_buffer": True, "buffer_size": ONE_MEGABYTE} + ) + table = ds.Scanner.from_fragment(fragment=delete_fragment).to_table() + table = table.unify_dictionaries() + return { + file.as_py(): table.filter(pc.field("file_path") == file).column("pos") + for file in table.column("file_path").chunks[0].dictionary + } + + +def _combine_positional_deletes(positional_deletes: List[pa.ChunkedArray], rows: int) -> pa.Array: + if len(positional_deletes) == 1: + all_chunks = positional_deletes[0] + else: + all_chunks = pa.chunked_array(chain(*[arr.chunks for arr in positional_deletes])) + return np.setdiff1d(np.arange(rows), all_chunks, assume_unique=False) + + +def pyarrow_to_schema(schema: pa.Schema) -> Schema: + visitor = _ConvertToIceberg() + return visit_pyarrow(schema, visitor) + + +@singledispatch +def visit_pyarrow(obj: Union[pa.DataType, pa.Schema], visitor: PyArrowSchemaVisitor[T]) -> T: + """Apply a pyarrow schema visitor to any point within a schema. + + The function traverses the schema in post-order fashion. + + Args: + obj (Union[pa.DataType, pa.Schema]): An instance of a Schema or an IcebergType. + visitor (PyArrowSchemaVisitor[T]): An instance of an implementation of the generic PyarrowSchemaVisitor base class. + + Raises: + NotImplementedError: If attempting to visit an unrecognized object type. + """ + raise NotImplementedError("Cannot visit non-type: %s" % obj) + + +@visit_pyarrow.register(pa.Schema) +def _(obj: pa.Schema, visitor: PyArrowSchemaVisitor[T]) -> Optional[T]: + struct_results: List[Optional[T]] = [] + for field in obj: + visitor.before_field(field) + struct_result = visit_pyarrow(field.type, visitor) + visitor.after_field(field) + struct_results.append(struct_result) + + return visitor.schema(obj, struct_results) + + +@visit_pyarrow.register(pa.StructType) +def _(obj: pa.StructType, visitor: PyArrowSchemaVisitor[T]) -> Optional[T]: + struct_results: List[Optional[T]] = [] + for field in obj: + visitor.before_field(field) + struct_result = visit_pyarrow(field.type, visitor) + visitor.after_field(field) + struct_results.append(struct_result) + + return visitor.struct(obj, struct_results) + + +@visit_pyarrow.register(pa.ListType) +def _(obj: pa.ListType, visitor: PyArrowSchemaVisitor[T]) -> Optional[T]: + visitor.before_field(obj.value_field) + list_result = visit_pyarrow(obj.value_field.type, visitor) + visitor.after_field(obj.value_field) + return visitor.list(obj, list_result) + + +@visit_pyarrow.register(pa.MapType) +def _(obj: pa.MapType, visitor: PyArrowSchemaVisitor[T]) -> Optional[T]: + visitor.before_field(obj.key_field) + key_result = visit_pyarrow(obj.key_field.type, visitor) + visitor.after_field(obj.key_field) + visitor.before_field(obj.item_field) + value_result = visit_pyarrow(obj.item_field.type, visitor) + visitor.after_field(obj.item_field) + return visitor.map(obj, key_result, value_result) + + +@visit_pyarrow.register(pa.DataType) +def _(obj: pa.DataType, visitor: PyArrowSchemaVisitor[T]) -> Optional[T]: + if pa.types.is_nested(obj): + raise TypeError(f"Expected primitive type, got: {type(obj)}") + return visitor.primitive(obj) + + +class PyArrowSchemaVisitor(Generic[T], ABC): + def before_field(self, field: pa.Field) -> None: + """Override this method to perform an action immediately before visiting a field.""" + + def after_field(self, field: pa.Field) -> None: + """Override this method to perform an action immediately after visiting a field.""" + + @abstractmethod + def schema(self, schema: pa.Schema, field_results: List[Optional[T]]) -> Optional[T]: + """Visit a schema.""" + + @abstractmethod + def struct(self, struct: pa.StructType, field_results: List[Optional[T]]) -> Optional[T]: + """Visit a struct.""" + + @abstractmethod + def list(self, list_type: pa.ListType, element_result: Optional[T]) -> Optional[T]: + """Visit a list.""" + + @abstractmethod + def map(self, map_type: pa.MapType, key_result: Optional[T], value_result: Optional[T]) -> Optional[T]: + """Visit a map.""" + + @abstractmethod + def primitive(self, primitive: pa.DataType) -> Optional[T]: + """Visit a primitive type.""" + + +def _get_field_id(field: pa.Field) -> Optional[int]: + for pyarrow_field_id_key in PYARROW_FIELD_ID_KEYS: + if field_id_str := field.metadata.get(pyarrow_field_id_key): + return int(field_id_str.decode()) + return None + + +def _get_field_doc(field: pa.Field) -> Optional[str]: + for pyarrow_doc_key in PYARROW_FIELD_DOC_KEYS: + if doc_str := field.metadata.get(pyarrow_doc_key): + return doc_str.decode() + return None + + +class _ConvertToIceberg(PyArrowSchemaVisitor[Union[IcebergType, Schema]]): + def _convert_fields(self, arrow_fields: Iterable[pa.Field], field_results: List[Optional[IcebergType]]) -> List[NestedField]: + fields = [] + for i, field in enumerate(arrow_fields): + field_id = _get_field_id(field) + field_doc = _get_field_doc(field) + field_type = field_results[i] + if field_type is not None and field_id is not None: + fields.append(NestedField(field_id, field.name, field_type, required=not field.nullable, doc=field_doc)) + return fields + + def schema(self, schema: pa.Schema, field_results: List[Optional[IcebergType]]) -> Schema: + return Schema(*self._convert_fields(schema, field_results)) + + def struct(self, struct: pa.StructType, field_results: List[Optional[IcebergType]]) -> IcebergType: + return StructType(*self._convert_fields(struct, field_results)) + + def list(self, list_type: pa.ListType, element_result: Optional[IcebergType]) -> Optional[IcebergType]: + element_field = list_type.value_field + element_id = _get_field_id(element_field) + if element_result is not None and element_id is not None: + return ListType(element_id, element_result, element_required=not element_field.nullable) + return None + + def map( + self, map_type: pa.MapType, key_result: Optional[IcebergType], value_result: Optional[IcebergType] + ) -> Optional[IcebergType]: + key_field = map_type.key_field + key_id = _get_field_id(key_field) + value_field = map_type.item_field + value_id = _get_field_id(value_field) + if key_result is not None and value_result is not None and key_id is not None and value_id is not None: + return MapType(key_id, key_result, value_id, value_result, value_required=not value_field.nullable) + return None + + def primitive(self, primitive: pa.DataType) -> IcebergType: + if pa.types.is_boolean(primitive): + return BooleanType() + elif pa.types.is_int32(primitive): + return IntegerType() + elif pa.types.is_int64(primitive): + return LongType() + elif pa.types.is_float32(primitive): + return FloatType() + elif pa.types.is_float64(primitive): + return DoubleType() + elif isinstance(primitive, pa.Decimal128Type): + primitive = cast(pa.Decimal128Type, primitive) + return DecimalType(primitive.precision, primitive.scale) + elif pa.types.is_string(primitive): + return StringType() + elif pa.types.is_date32(primitive): + return DateType() + elif isinstance(primitive, pa.Time64Type) and primitive.unit == "us": + return TimeType() + elif pa.types.is_timestamp(primitive): + primitive = cast(pa.TimestampType, primitive) + if primitive.unit == "us": + if primitive.tz == "UTC" or primitive.tz == "+00:00": + return TimestamptzType() + elif primitive.tz is None: + return TimestampType() + elif pa.types.is_binary(primitive): + return BinaryType() + elif pa.types.is_fixed_size_binary(primitive): + primitive = cast(pa.FixedSizeBinaryType, primitive) + return FixedType(primitive.byte_width) + + raise TypeError(f"Unsupported type: {primitive}") + + +def _task_to_table( + fs: FileSystem, + task: FileScanTask, + bound_row_filter: BooleanExpression, + projected_schema: Schema, + projected_field_ids: Set[int], + positional_deletes: Optional[List[ChunkedArray]], + case_sensitive: bool, + row_counts: List[int], + limit: Optional[int] = None, +) -> Optional[pa.Table]: + if limit and sum(row_counts) >= limit: + return None + + _, _, path = PyArrowFileIO.parse_location(task.file.file_path) + arrow_format = ds.ParquetFileFormat(pre_buffer=True, buffer_size=(ONE_MEGABYTE * 8)) + with fs.open_input_file(path) as fin: + fragment = arrow_format.make_fragment(fin) + physical_schema = fragment.physical_schema + schema_raw = None + if metadata := physical_schema.metadata: + schema_raw = metadata.get(ICEBERG_SCHEMA) + # TODO: if field_ids are not present, Name Mapping should be implemented to look them up in the table schema, + # see https://github.com/apache/iceberg/issues/7451 + file_schema = Schema.model_validate_json(schema_raw) if schema_raw is not None else pyarrow_to_schema(physical_schema) + + pyarrow_filter = None + if bound_row_filter is not AlwaysTrue(): + translated_row_filter = translate_column_names(bound_row_filter, file_schema, case_sensitive=case_sensitive) + bound_file_filter = bind(file_schema, translated_row_filter, case_sensitive=case_sensitive) + pyarrow_filter = expression_to_pyarrow(bound_file_filter) + + file_project_schema = prune_columns(file_schema, projected_field_ids, select_full_types=False) + + if file_schema is None: + raise ValueError(f"Missing Iceberg schema in Metadata for file: {path}") + + fragment_scanner = ds.Scanner.from_fragment( + fragment=fragment, + schema=physical_schema, + # This will push down the query to Arrow. + # But in case there are positional deletes, we have to apply them first + filter=pyarrow_filter if not positional_deletes else None, + columns=[col.name for col in file_project_schema.columns], + ) + + if positional_deletes: + # Create the mask of indices that we're interested in + indices = _combine_positional_deletes(positional_deletes, fragment.count_rows()) + + if limit: + if pyarrow_filter is not None: + # In case of the filter, we don't exactly know how many rows + # we need to fetch upfront, can be optimized in the future: + # https://github.com/apache/arrow/issues/35301 + arrow_table = fragment_scanner.take(indices) + arrow_table = arrow_table.filter(pyarrow_filter) + arrow_table = arrow_table.slice(0, limit) + else: + arrow_table = fragment_scanner.take(indices[0:limit]) + else: + arrow_table = fragment_scanner.take(indices) + # Apply the user filter + if pyarrow_filter is not None: + arrow_table = arrow_table.filter(pyarrow_filter) + else: + # If there are no deletes, we can just take the head + # and the user-filter is already applied + if limit: + arrow_table = fragment_scanner.head(limit) + else: + arrow_table = fragment_scanner.to_table() + + if len(arrow_table) < 1: + return None + + if limit is not None and sum(row_counts) >= limit: + return None + + row_counts.append(len(arrow_table)) + + return to_requested_schema(projected_schema, file_project_schema, arrow_table) + + +def _read_all_delete_files(fs: FileSystem, tasks: Iterable[FileScanTask]) -> Dict[str, List[ChunkedArray]]: + deletes_per_file: Dict[str, List[ChunkedArray]] = {} + unique_deletes = set(chain.from_iterable([task.delete_files for task in tasks])) + if len(unique_deletes) > 0: + executor = ExecutorFactory.get_or_create() + deletes_per_files: Iterator[Dict[str, ChunkedArray]] = executor.map( + lambda args: _read_deletes(*args), [(fs, delete) for delete in unique_deletes] + ) + for delete in deletes_per_files: + for file, arr in delete.items(): + if file in deletes_per_file: + deletes_per_file[file].append(arr) + else: + deletes_per_file[file] = [arr] + + return deletes_per_file + + +def project_table( + tasks: Iterable[FileScanTask], + table: Table, + row_filter: BooleanExpression, + projected_schema: Schema, + case_sensitive: bool = True, + limit: Optional[int] = None, +) -> pa.Table: + """Resolve the right columns based on the identifier. + + Args: + tasks (Iterable[FileScanTask]): A URI or a path to a local file. + table (Table): The table that's being queried. + row_filter (BooleanExpression): The expression for filtering rows. + projected_schema (Schema): The output schema. + case_sensitive (bool): Case sensitivity when looking up column names. + limit (Optional[int]): Limit the number of records. + + Raises: + ResolveError: When an incompatible query is done. + """ + scheme, netloc, _ = PyArrowFileIO.parse_location(table.location()) + if isinstance(table.io, PyArrowFileIO): + fs = table.io.fs_by_scheme(scheme, netloc) + else: + try: + from pyiceberg.io.fsspec import FsspecFileIO + + if isinstance(table.io, FsspecFileIO): + from pyarrow.fs import PyFileSystem + + fs = PyFileSystem(FSSpecHandler(table.io.get_fs(scheme))) + else: + raise ValueError(f"Expected PyArrowFileIO or FsspecFileIO, got: {table.io}") + except ModuleNotFoundError as e: + # When FsSpec is not installed + raise ValueError(f"Expected PyArrowFileIO or FsspecFileIO, got: {table.io}") from e + + bound_row_filter = bind(table.schema(), row_filter, case_sensitive=case_sensitive) + + projected_field_ids = { + id for id in projected_schema.field_ids if not isinstance(projected_schema.find_type(id), (MapType, ListType)) + }.union(extract_field_ids(bound_row_filter)) + + row_counts: List[int] = [] + deletes_per_file = _read_all_delete_files(fs, tasks) + executor = ExecutorFactory.get_or_create() + futures = [ + executor.submit( + _task_to_table, + fs, + task, + bound_row_filter, + projected_schema, + projected_field_ids, + deletes_per_file.get(task.file.file_path), + case_sensitive, + row_counts, + limit, + ) + for task in tasks + ] + + # for consistent ordering, we need to maintain future order + futures_index = {f: i for i, f in enumerate(futures)} + completed_futures: SortedList[Future[pa.Table]] = SortedList(iterable=[], key=lambda f: futures_index[f]) + for future in concurrent.futures.as_completed(futures): + completed_futures.add(future) + + # stop early if limit is satisfied + if limit is not None and sum(row_counts) >= limit: + break + + # by now, we've either completed all tasks or satisfied the limit + if limit is not None: + _ = [f.cancel() for f in futures if not f.done()] + + tables = [f.result() for f in completed_futures if f.result()] + + if len(tables) < 1: + return pa.Table.from_batches([], schema=schema_to_pyarrow(projected_schema)) + + result = pa.concat_tables(tables) + + if limit is not None: + return result.slice(0, limit) + + return result + + +def to_requested_schema(requested_schema: Schema, file_schema: Schema, table: pa.Table) -> pa.Table: + struct_array = visit_with_partner(requested_schema, table, ArrowProjectionVisitor(file_schema), ArrowAccessor(file_schema)) + + arrays = [] + fields = [] + for pos, field in enumerate(requested_schema.fields): + array = struct_array.field(pos) + arrays.append(array) + fields.append(pa.field(field.name, array.type, field.optional)) + return pa.Table.from_arrays(arrays, schema=pa.schema(fields)) + + +class ArrowProjectionVisitor(SchemaWithPartnerVisitor[pa.Array, Optional[pa.Array]]): + file_schema: Schema + + def __init__(self, file_schema: Schema): + self.file_schema = file_schema + + def cast_if_needed(self, field: NestedField, values: pa.Array) -> pa.Array: + file_field = self.file_schema.find_field(field.field_id) + if field.field_type.is_primitive and field.field_type != file_field.field_type: + return values.cast(schema_to_pyarrow(promote(file_field.field_type, field.field_type))) + return values + + def schema(self, schema: Schema, schema_partner: Optional[pa.Array], struct_result: Optional[pa.Array]) -> Optional[pa.Array]: + return struct_result + + def struct( + self, struct: StructType, struct_array: Optional[pa.Array], field_results: List[Optional[pa.Array]] + ) -> Optional[pa.Array]: + if struct_array is None: + return None + field_arrays: List[pa.Array] = [] + fields: List[pa.Field] = [] + for field, field_array in zip(struct.fields, field_results): + if field_array is not None: + array = self.cast_if_needed(field, field_array) + field_arrays.append(array) + fields.append(pa.field(field.name, array.type, field.optional)) + elif field.optional: + arrow_type = schema_to_pyarrow(field.field_type) + field_arrays.append(pa.nulls(len(struct_array), type=arrow_type)) + fields.append(pa.field(field.name, arrow_type, field.optional)) + else: + raise ResolveError(f"Field is required, and could not be found in the file: {field}") + + return pa.StructArray.from_arrays(arrays=field_arrays, fields=pa.struct(fields)) + + def field(self, field: NestedField, _: Optional[pa.Array], field_array: Optional[pa.Array]) -> Optional[pa.Array]: + return field_array + + def list(self, list_type: ListType, list_array: Optional[pa.Array], value_array: Optional[pa.Array]) -> Optional[pa.Array]: + return ( + pa.ListArray.from_arrays(list_array.offsets, self.cast_if_needed(list_type.element_field, value_array)) + if isinstance(list_array, pa.ListArray) + else None + ) + + def map( + self, map_type: MapType, map_array: Optional[pa.Array], key_result: Optional[pa.Array], value_result: Optional[pa.Array] + ) -> Optional[pa.Array]: + return ( + pa.MapArray.from_arrays( + map_array.offsets, + self.cast_if_needed(map_type.key_field, key_result), + self.cast_if_needed(map_type.value_field, value_result), + ) + if isinstance(map_array, pa.MapArray) + else None + ) + + def primitive(self, _: PrimitiveType, array: Optional[pa.Array]) -> Optional[pa.Array]: + return array + + +class ArrowAccessor(PartnerAccessor[pa.Array]): + file_schema: Schema + + def __init__(self, file_schema: Schema): + self.file_schema = file_schema + + def schema_partner(self, partner: Optional[pa.Array]) -> Optional[pa.Array]: + return partner + + def field_partner(self, partner_struct: Optional[pa.Array], field_id: int, _: str) -> Optional[pa.Array]: + if partner_struct: + # use the field name from the file schema + try: + name = self.file_schema.find_field(field_id).name + except ValueError: + return None + + if isinstance(partner_struct, pa.StructArray): + return partner_struct.field(name) + elif isinstance(partner_struct, pa.Table): + return partner_struct.column(name).combine_chunks() + + return None + + def list_element_partner(self, partner_list: Optional[pa.Array]) -> Optional[pa.Array]: + return partner_list.values if isinstance(partner_list, pa.ListArray) else None + + def map_key_partner(self, partner_map: Optional[pa.Array]) -> Optional[pa.Array]: + return partner_map.keys if isinstance(partner_map, pa.MapArray) else None + + def map_value_partner(self, partner_map: Optional[pa.Array]) -> Optional[pa.Array]: + return partner_map.items if isinstance(partner_map, pa.MapArray) else None + + +_PRIMITIVE_TO_PHYSICAL = { + BooleanType(): "BOOLEAN", + IntegerType(): "INT32", + LongType(): "INT64", + FloatType(): "FLOAT", + DoubleType(): "DOUBLE", + DateType(): "INT32", + TimeType(): "INT64", + TimestampType(): "INT64", + TimestamptzType(): "INT64", + StringType(): "BYTE_ARRAY", + UUIDType(): "FIXED_LEN_BYTE_ARRAY", + BinaryType(): "BYTE_ARRAY", +} +_PHYSICAL_TYPES = set(_PRIMITIVE_TO_PHYSICAL.values()).union({"INT96"}) + + +class StatsAggregator: + current_min: Any + current_max: Any + trunc_length: Optional[int] + + def __init__(self, iceberg_type: PrimitiveType, physical_type_string: str, trunc_length: Optional[int] = None) -> None: + self.current_min = None + self.current_max = None + self.trunc_length = trunc_length + + if physical_type_string not in _PHYSICAL_TYPES: + raise ValueError(f"Unknown physical type {physical_type_string}") + + if physical_type_string == "INT96": + raise NotImplementedError("Statistics not implemented for INT96 physical type") + + expected_physical_type = _PRIMITIVE_TO_PHYSICAL[iceberg_type] + if expected_physical_type != physical_type_string: + raise ValueError( + f"Unexpected physical type {physical_type_string} for {iceberg_type}, expected {expected_physical_type}" + ) + + self.primitive_type = iceberg_type + + def serialize(self, value: Any) -> bytes: + return to_bytes(self.primitive_type, value) + + def update_min(self, val: Any) -> None: + self.current_min = val if self.current_min is None else min(val, self.current_min) + + def update_max(self, val: Any) -> None: + self.current_max = val if self.current_max is None else max(val, self.current_max) + + def min_as_bytes(self) -> bytes: + return self.serialize( + self.current_min + if self.trunc_length is None + else TruncateTransform(width=self.trunc_length).transform(self.primitive_type)(self.current_min) + ) + + def max_as_bytes(self) -> Optional[bytes]: + if self.current_max is None: + return None + + if self.primitive_type == StringType(): + if not isinstance(self.current_max, str): + raise ValueError("Expected the current_max to be a string") + s_result = truncate_upper_bound_text_string(self.current_max, self.trunc_length) + return self.serialize(s_result) if s_result is not None else None + elif self.primitive_type == BinaryType(): + if not isinstance(self.current_max, bytes): + raise ValueError("Expected the current_max to be bytes") + b_result = truncate_upper_bound_binary_string(self.current_max, self.trunc_length) + return self.serialize(b_result) if b_result is not None else None + else: + if self.trunc_length is not None: + raise ValueError(f"{self.primitive_type} cannot be truncated") + return self.serialize(self.current_max) + + +DEFAULT_TRUNCATION_LENGTH = 16 +TRUNCATION_EXPR = r"^truncate\((\d+)\)$" + + +class MetricModeTypes(Enum): + TRUNCATE = "truncate" + NONE = "none" + COUNTS = "counts" + FULL = "full" + + +DEFAULT_METRICS_MODE_KEY = "write.metadata.metrics.default" +COLUMN_METRICS_MODE_KEY_PREFIX = "write.metadata.metrics.column" + + +@dataclass(frozen=True) +class MetricsMode(Singleton): + type: MetricModeTypes + length: Optional[int] = None + + +_DEFAULT_METRICS_MODE = MetricsMode(MetricModeTypes.TRUNCATE, DEFAULT_TRUNCATION_LENGTH) + + +def match_metrics_mode(mode: str) -> MetricsMode: + sanitized_mode = mode.strip().lower() + if sanitized_mode.startswith("truncate"): + m = re.match(TRUNCATION_EXPR, sanitized_mode) + if m: + length = int(m[1]) + if length < 1: + raise ValueError("Truncation length must be larger than 0") + return MetricsMode(MetricModeTypes.TRUNCATE, int(m[1])) + else: + raise ValueError(f"Malformed truncate: {mode}") + elif sanitized_mode == "none": + return MetricsMode(MetricModeTypes.NONE) + elif sanitized_mode == "counts": + return MetricsMode(MetricModeTypes.COUNTS) + elif sanitized_mode == "full": + return MetricsMode(MetricModeTypes.FULL) + else: + raise ValueError(f"Unsupported metrics mode: {mode}") + + +@dataclass(frozen=True) +class StatisticsCollector: + field_id: int + iceberg_type: PrimitiveType + mode: MetricsMode + column_name: str + + +class PyArrowStatisticsCollector(PreOrderSchemaVisitor[List[StatisticsCollector]]): + _field_id: int = 0 + _schema: Schema + _properties: Dict[str, str] + _default_mode: Optional[str] + + def __init__(self, schema: Schema, properties: Dict[str, str]): + self._schema = schema + self._properties = properties + self._default_mode = self._properties.get(DEFAULT_METRICS_MODE_KEY) + + def schema(self, schema: Schema, struct_result: Callable[[], List[StatisticsCollector]]) -> List[StatisticsCollector]: + return struct_result() + + def struct( + self, struct: StructType, field_results: List[Callable[[], List[StatisticsCollector]]] + ) -> List[StatisticsCollector]: + return list(chain(*[result() for result in field_results])) + + def field(self, field: NestedField, field_result: Callable[[], List[StatisticsCollector]]) -> List[StatisticsCollector]: + self._field_id = field.field_id + return field_result() + + def list(self, list_type: ListType, element_result: Callable[[], List[StatisticsCollector]]) -> List[StatisticsCollector]: + self._field_id = list_type.element_id + return element_result() + + def map( + self, + map_type: MapType, + key_result: Callable[[], List[StatisticsCollector]], + value_result: Callable[[], List[StatisticsCollector]], + ) -> List[StatisticsCollector]: + self._field_id = map_type.key_id + k = key_result() + self._field_id = map_type.value_id + v = value_result() + return k + v + + def primitive(self, primitive: PrimitiveType) -> List[StatisticsCollector]: + column_name = self._schema.find_column_name(self._field_id) + if column_name is None: + return [] + + metrics_mode = _DEFAULT_METRICS_MODE + + if self._default_mode: + metrics_mode = match_metrics_mode(self._default_mode) + + col_mode = self._properties.get(f"{COLUMN_METRICS_MODE_KEY_PREFIX}.{column_name}") + if col_mode: + metrics_mode = match_metrics_mode(col_mode) + + if ( + not (isinstance(primitive, StringType) or isinstance(primitive, BinaryType)) + and metrics_mode.type == MetricModeTypes.TRUNCATE + ): + metrics_mode = MetricsMode(MetricModeTypes.FULL) + + is_nested = column_name.find(".") >= 0 + + if is_nested and metrics_mode.type in [MetricModeTypes.TRUNCATE, MetricModeTypes.FULL]: + metrics_mode = MetricsMode(MetricModeTypes.COUNTS) + + return [StatisticsCollector(field_id=self._field_id, iceberg_type=primitive, mode=metrics_mode, column_name=column_name)] + + +def compute_statistics_plan( + schema: Schema, + table_properties: Dict[str, str], +) -> Dict[int, StatisticsCollector]: + """ + Compute the statistics plan for all columns. + + The resulting list is assumed to have the same length and same order as the columns in the pyarrow table. + This allows the list to map from the column index to the Iceberg column ID. + For each element, the desired metrics collection that was provided by the user in the configuration + is computed and then adjusted according to the data type of the column. For nested columns the minimum + and maximum values are not computed. And truncation is only applied to text of binary strings. + + Args: + table_properties (from pyiceberg.table.metadata.TableMetadata): The Iceberg table metadata properties. + They are required to compute the mapping of column position to iceberg schema type id. It's also + used to set the mode for column metrics collection + """ + stats_cols = pre_order_visit(schema, PyArrowStatisticsCollector(schema, table_properties)) + result: Dict[int, StatisticsCollector] = {} + for stats_col in stats_cols: + result[stats_col.field_id] = stats_col + return result + + +@dataclass(frozen=True) +class ID2ParquetPath: + field_id: int + parquet_path: str + + +class ID2ParquetPathVisitor(PreOrderSchemaVisitor[List[ID2ParquetPath]]): + _field_id: int = 0 + _path: List[str] + + def __init__(self) -> None: + self._path = [] + + def schema(self, schema: Schema, struct_result: Callable[[], List[ID2ParquetPath]]) -> List[ID2ParquetPath]: + return struct_result() + + def struct(self, struct: StructType, field_results: List[Callable[[], List[ID2ParquetPath]]]) -> List[ID2ParquetPath]: + return list(chain(*[result() for result in field_results])) + + def field(self, field: NestedField, field_result: Callable[[], List[ID2ParquetPath]]) -> List[ID2ParquetPath]: + self._field_id = field.field_id + self._path.append(field.name) + result = field_result() + self._path.pop() + return result + + def list(self, list_type: ListType, element_result: Callable[[], List[ID2ParquetPath]]) -> List[ID2ParquetPath]: + self._field_id = list_type.element_id + self._path.append("list.element") + result = element_result() + self._path.pop() + return result + + def map( + self, + map_type: MapType, + key_result: Callable[[], List[ID2ParquetPath]], + value_result: Callable[[], List[ID2ParquetPath]], + ) -> List[ID2ParquetPath]: + self._field_id = map_type.key_id + self._path.append("key_value.key") + k = key_result() + self._path.pop() + self._field_id = map_type.value_id + self._path.append("key_value.value") + v = value_result() + self._path.pop() + return k + v + + def primitive(self, primitive: PrimitiveType) -> List[ID2ParquetPath]: + return [ID2ParquetPath(field_id=self._field_id, parquet_path=".".join(self._path))] + + +def parquet_path_to_id_mapping( + schema: Schema, +) -> Dict[str, int]: + """ + Compute the mapping of parquet column path to Iceberg ID. + + For each column, the parquet file metadata has a path_in_schema attribute that follows + a specific naming scheme for nested columnds. This function computes a mapping of + the full paths to the corresponding Iceberg IDs. + + Args: + schema (pyiceberg.schema.Schema): The current table schema. + """ + result: Dict[str, int] = {} + for pair in pre_order_visit(schema, ID2ParquetPathVisitor()): + result[pair.parquet_path] = pair.field_id + return result + + +def fill_parquet_file_metadata( + df: DataFile, + parquet_metadata: pq.FileMetaData, + file_size: int, + stats_columns: Dict[int, StatisticsCollector], + parquet_column_mapping: Dict[str, int], +) -> None: + """ + Compute and fill the following fields of the DataFile object. + + - file_format + - record_count + - file_size_in_bytes + - column_sizes + - value_counts + - null_value_counts + - nan_value_counts + - lower_bounds + - upper_bounds + - split_offsets + + Args: + df (DataFile): A DataFile object representing the Parquet file for which metadata is to be filled. + parquet_metadata (pyarrow.parquet.FileMetaData): A pyarrow metadata object. + file_size (int): The total compressed file size cannot be retrieved from the metadata and hence has to + be passed here. Depending on the kind of file system and pyarrow library call used, different + ways to obtain this value might be appropriate. + stats_columns (Dict[int, StatisticsCollector]): The statistics gathering plan. It is required to + set the mode for column metrics collection + """ + if parquet_metadata.num_columns != len(stats_columns): + raise ValueError( + f"Number of columns in statistics configuration ({len(stats_columns)}) is different from the number of columns in pyarrow table ({parquet_metadata.num_columns})" + ) + + if parquet_metadata.num_columns != len(parquet_column_mapping): + raise ValueError( + f"Number of columns in column mapping ({len(parquet_column_mapping)}) is different from the number of columns in pyarrow table ({parquet_metadata.num_columns})" + ) + + column_sizes: Dict[int, int] = {} + value_counts: Dict[int, int] = {} + split_offsets: List[int] = [] + + null_value_counts: Dict[int, int] = {} + nan_value_counts: Dict[int, int] = {} + + col_aggs = {} + + for r in range(parquet_metadata.num_row_groups): + # References: + # https://github.com/apache/iceberg/blob/fc381a81a1fdb8f51a0637ca27cd30673bd7aad3/parquet/src/main/java/org/apache/iceberg/parquet/ParquetUtil.java#L232 + # https://github.com/apache/parquet-mr/blob/ac29db4611f86a07cc6877b416aa4b183e09b353/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/metadata/ColumnChunkMetaData.java#L184 + + row_group = parquet_metadata.row_group(r) + + data_offset = row_group.column(0).data_page_offset + dictionary_offset = row_group.column(0).dictionary_page_offset + + if row_group.column(0).has_dictionary_page and dictionary_offset < data_offset: + split_offsets.append(dictionary_offset) + else: + split_offsets.append(data_offset) + + invalidate_col: Set[int] = set() + + for pos in range(0, parquet_metadata.num_columns): + column = row_group.column(pos) + field_id = parquet_column_mapping[column.path_in_schema] + + stats_col = stats_columns[field_id] + + column_sizes.setdefault(field_id, 0) + column_sizes[field_id] += column.total_compressed_size + + if stats_col.mode == MetricsMode(MetricModeTypes.NONE): + continue + + value_counts[field_id] = value_counts.get(field_id, 0) + column.num_values + + if column.is_stats_set: + try: + statistics = column.statistics + + if statistics.has_null_count: + null_value_counts[field_id] = null_value_counts.get(field_id, 0) + statistics.null_count + + if stats_col.mode == MetricsMode(MetricModeTypes.COUNTS): + continue + + if field_id not in col_aggs: + col_aggs[field_id] = StatsAggregator( + stats_col.iceberg_type, statistics.physical_type, stats_col.mode.length + ) + + col_aggs[field_id].update_min(statistics.min) + col_aggs[field_id].update_max(statistics.max) + + except pyarrow.lib.ArrowNotImplementedError as e: + invalidate_col.add(field_id) + logger.warning(e) + else: + invalidate_col.add(field_id) + logger.warning("PyArrow statistics missing for column %d when writing file", pos) + + split_offsets.sort() + + lower_bounds = {} + upper_bounds = {} + + for k, agg in col_aggs.items(): + _min = agg.min_as_bytes() + if _min is not None: + lower_bounds[k] = _min + _max = agg.max_as_bytes() + if _max is not None: + upper_bounds[k] = _max + + for field_id in invalidate_col: + del lower_bounds[field_id] + del upper_bounds[field_id] + del null_value_counts[field_id] + + df.file_format = FileFormat.PARQUET + df.record_count = parquet_metadata.num_rows + df.file_size_in_bytes = file_size + df.column_sizes = column_sizes + df.value_counts = value_counts + df.null_value_counts = null_value_counts + df.nan_value_counts = nan_value_counts + df.lower_bounds = lower_bounds + df.upper_bounds = upper_bounds + df.split_offsets = split_offsets diff --git a/pyiceberg/manifest.py b/pyiceberg/manifest.py new file mode 100644 index 0000000000..8bdbfd3524 --- /dev/null +++ b/pyiceberg/manifest.py @@ -0,0 +1,861 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +from __future__ import annotations + +import math +from abc import ABC, abstractmethod +from enum import Enum +from functools import singledispatch +from types import TracebackType +from typing import ( + Any, + Dict, + Iterator, + List, + Literal, + Optional, + Type, +) + +from pyiceberg.avro.file import AvroFile, AvroOutputFile +from pyiceberg.conversions import to_bytes +from pyiceberg.exceptions import ValidationError +from pyiceberg.io import FileIO, InputFile, OutputFile +from pyiceberg.partitioning import PartitionSpec +from pyiceberg.schema import Schema +from pyiceberg.typedef import Record +from pyiceberg.types import ( + BinaryType, + BooleanType, + DateType, + IcebergType, + IntegerType, + ListType, + LongType, + MapType, + NestedField, + PrimitiveType, + StringType, + StructType, + TimestampType, + TimestamptzType, + TimeType, +) + +UNASSIGNED_SEQ = -1 +DEFAULT_BLOCK_SIZE = 67108864 # 64 * 1024 * 1024 + + +class DataFileContent(int, Enum): + DATA = 0 + POSITION_DELETES = 1 + EQUALITY_DELETES = 2 + + def __repr__(self) -> str: + """Return the string representation of the DataFileContent class.""" + return f"DataFileContent.{self.name}" + + +class ManifestContent(int, Enum): + DATA = 0 + DELETES = 1 + + def __repr__(self) -> str: + """Return the string representation of the ManifestContent class.""" + return f"ManifestContent.{self.name}" + + +class ManifestEntryStatus(int, Enum): + EXISTING = 0 + ADDED = 1 + DELETED = 2 + + def __repr__(self) -> str: + """Return the string representation of the ManifestEntryStatus class.""" + return f"ManifestEntryStatus.{self.name}" + + +class FileFormat(str, Enum): + AVRO = "AVRO" + PARQUET = "PARQUET" + ORC = "ORC" + + def __repr__(self) -> str: + """Return the string representation of the FileFormat class.""" + return f"FileFormat.{self.name}" + + +DATA_FILE_TYPE_V1 = StructType( + NestedField( + field_id=134, + name="content", + field_type=IntegerType(), + required=False, + doc="Contents of the file: 0=data, 1=position deletes, 2=equality deletes", + initial_default=DataFileContent.DATA, + ), + NestedField(field_id=100, name="file_path", field_type=StringType(), required=True, doc="Location URI with FS scheme"), + NestedField( + field_id=101, + name="file_format", + field_type=StringType(), + required=True, + doc="File format name: avro, orc, or parquet", + ), + NestedField( + field_id=102, + name="partition", + field_type=StructType(), + required=True, + doc="Partition data tuple, schema based on the partition spec", + ), + NestedField(field_id=103, name="record_count", field_type=LongType(), required=True, doc="Number of records in the file"), + NestedField(field_id=104, name="file_size_in_bytes", field_type=LongType(), required=True, doc="Total file size in bytes"), + NestedField( + field_id=105, + name="block_size_in_bytes", + field_type=LongType(), + required=False, + doc="Deprecated. Always write a default in v1. Do not write in v2.", + ), + NestedField( + field_id=108, + name="column_sizes", + field_type=MapType(key_id=117, key_type=IntegerType(), value_id=118, value_type=LongType()), + required=False, + doc="Map of column id to total size on disk", + ), + NestedField( + field_id=109, + name="value_counts", + field_type=MapType(key_id=119, key_type=IntegerType(), value_id=120, value_type=LongType()), + required=False, + doc="Map of column id to total count, including null and NaN", + ), + NestedField( + field_id=110, + name="null_value_counts", + field_type=MapType(key_id=121, key_type=IntegerType(), value_id=122, value_type=LongType()), + required=False, + doc="Map of column id to null value count", + ), + NestedField( + field_id=137, + name="nan_value_counts", + field_type=MapType(key_id=138, key_type=IntegerType(), value_id=139, value_type=LongType()), + required=False, + doc="Map of column id to number of NaN values in the column", + ), + NestedField( + field_id=125, + name="lower_bounds", + field_type=MapType(key_id=126, key_type=IntegerType(), value_id=127, value_type=BinaryType()), + required=False, + doc="Map of column id to lower bound", + ), + NestedField( + field_id=128, + name="upper_bounds", + field_type=MapType(key_id=129, key_type=IntegerType(), value_id=130, value_type=BinaryType()), + required=False, + doc="Map of column id to upper bound", + ), + NestedField(field_id=131, name="key_metadata", field_type=BinaryType(), required=False, doc="Encryption key metadata blob"), + NestedField( + field_id=132, + name="split_offsets", + field_type=ListType(element_id=133, element_type=LongType(), element_required=True), + required=False, + doc="Splittable offsets", + ), + NestedField( + field_id=135, + name="equality_ids", + field_type=ListType(element_id=136, element_type=LongType(), element_required=True), + required=False, + doc="Equality comparison field IDs", + ), + NestedField(field_id=140, name="sort_order_id", field_type=IntegerType(), required=False, doc="Sort order ID"), + NestedField(field_id=141, name="spec_id", field_type=IntegerType(), required=False, doc="Partition spec ID"), +) + +DATA_FILE_TYPE_V2 = StructType(*[field for field in DATA_FILE_TYPE_V1.fields if field.field_id != 105]) + + +@singledispatch +def partition_field_to_data_file_partition_field(partition_field_type: IcebergType) -> PrimitiveType: + raise TypeError(f"Unsupported partition field type: {partition_field_type}") + + +@partition_field_to_data_file_partition_field.register(LongType) +@partition_field_to_data_file_partition_field.register(DateType) +@partition_field_to_data_file_partition_field.register(TimeType) +@partition_field_to_data_file_partition_field.register(TimestampType) +@partition_field_to_data_file_partition_field.register(TimestamptzType) +def _(partition_field_type: PrimitiveType) -> IntegerType: + return IntegerType() + + +@partition_field_to_data_file_partition_field.register(PrimitiveType) +def _(partition_field_type: PrimitiveType) -> PrimitiveType: + return partition_field_type + + +def data_file_with_partition(partition_type: StructType, format_version: Literal[1, 2]) -> StructType: + data_file_partition_type = StructType( + *[ + NestedField( + field_id=field.field_id, + name=field.name, + field_type=partition_field_to_data_file_partition_field(field.field_type), + ) + for field in partition_type.fields + ] + ) + + return StructType( + *[ + NestedField( + field_id=102, + name="partition", + field_type=data_file_partition_type, + required=True, + doc="Partition data tuple, schema based on the partition spec", + ) + if field.field_id == 102 + else field + for field in (DATA_FILE_TYPE_V1.fields if format_version == 1 else DATA_FILE_TYPE_V2.fields) + ] + ) + + +class DataFile(Record): + __slots__ = ( + "content", + "file_path", + "file_format", + "partition", + "record_count", + "file_size_in_bytes", + "block_size_in_bytes", + "column_sizes", + "value_counts", + "null_value_counts", + "nan_value_counts", + "lower_bounds", + "upper_bounds", + "key_metadata", + "split_offsets", + "equality_ids", + "sort_order_id", + "spec_id", + ) + content: DataFileContent + file_path: str + file_format: FileFormat + partition: Record + record_count: int + file_size_in_bytes: int + block_size_in_bytes: Optional[int] + column_sizes: Dict[int, int] + value_counts: Dict[int, int] + null_value_counts: Dict[int, int] + nan_value_counts: Dict[int, int] + lower_bounds: Dict[int, bytes] + upper_bounds: Dict[int, bytes] + key_metadata: Optional[bytes] + split_offsets: Optional[List[int]] + equality_ids: Optional[List[int]] + sort_order_id: Optional[int] + spec_id: Optional[int] + + def __setattr__(self, name: str, value: Any) -> None: + """Assign a key/value to a DataFile.""" + # The file_format is written as a string, so we need to cast it to the Enum + if name == "file_format": + value = FileFormat[value] + super().__setattr__(name, value) + + def __init__(self, format_version: Literal[1, 2] = 1, *data: Any, **named_data: Any) -> None: + super().__init__( + *data, + **{"struct": DATA_FILE_TYPE_V1 if format_version == 1 else DATA_FILE_TYPE_V2, **named_data}, + ) + + def __hash__(self) -> int: + """Return the hash of the file path.""" + return hash(self.file_path) + + def __eq__(self, other: Any) -> bool: + """Compare the datafile with another object. + + If it is a datafile, it will compare based on the file_path. + """ + return self.file_path == other.file_path if isinstance(other, DataFile) else False + + +MANIFEST_ENTRY_SCHEMA = Schema( + NestedField(0, "status", IntegerType(), required=True), + NestedField(1, "snapshot_id", LongType(), required=False), + NestedField(3, "data_sequence_number", LongType(), required=False), + NestedField(4, "file_sequence_number", LongType(), required=False), + NestedField(2, "data_file", DATA_FILE_TYPE_V1, required=True), +) + +MANIFEST_ENTRY_SCHEMA_STRUCT = MANIFEST_ENTRY_SCHEMA.as_struct() + + +def manifest_entry_schema_with_data_file(data_file: StructType) -> Schema: + return Schema( + *[ + NestedField(2, "data_file", data_file, required=True) if field.field_id == 2 else field + for field in MANIFEST_ENTRY_SCHEMA.fields + ] + ) + + +class ManifestEntry(Record): + __slots__ = ("status", "snapshot_id", "data_sequence_number", "file_sequence_number", "data_file") + status: ManifestEntryStatus + snapshot_id: Optional[int] + data_sequence_number: Optional[int] + file_sequence_number: Optional[int] + data_file: DataFile + + def __init__(self, *data: Any, **named_data: Any) -> None: + super().__init__(*data, **{"struct": MANIFEST_ENTRY_SCHEMA_STRUCT, **named_data}) + + +PARTITION_FIELD_SUMMARY_TYPE = StructType( + NestedField(509, "contains_null", BooleanType(), required=True), + NestedField(518, "contains_nan", BooleanType(), required=False), + NestedField(510, "lower_bound", BinaryType(), required=False), + NestedField(511, "upper_bound", BinaryType(), required=False), +) + + +class PartitionFieldSummary(Record): + __slots__ = ("contains_null", "contains_nan", "lower_bound", "upper_bound") + contains_null: bool + contains_nan: Optional[bool] + lower_bound: Optional[bytes] + upper_bound: Optional[bytes] + + def __init__(self, *data: Any, **named_data: Any) -> None: + super().__init__(*data, **{"struct": PARTITION_FIELD_SUMMARY_TYPE, **named_data}) + + +class PartitionFieldStats: + _type: PrimitiveType + _contains_null: bool + _contains_nan: bool + _min: Optional[Any] + _max: Optional[Any] + + def __init__(self, iceberg_type: PrimitiveType) -> None: + self._type = iceberg_type + self._contains_null = False + self._contains_nan = False + self._min = None + self._max = None + + def to_summary(self) -> PartitionFieldSummary: + return PartitionFieldSummary( + contains_null=self._contains_null, + contains_nan=self._contains_nan, + lower_bound=to_bytes(self._type, self._min) if self._min is not None else None, + upper_bound=to_bytes(self._type, self._max) if self._max is not None else None, + ) + + def update(self, value: Any) -> None: + if value is None: + self._contains_null = True + elif isinstance(value, float) and math.isnan(value): + self._contains_nan = True + else: + if self._min is None: + self._min = value + self._max = value + else: + self._max = max(self._max, value) + self._min = min(self._min, value) + + +def construct_partition_summaries(spec: PartitionSpec, schema: Schema, partitions: List[Record]) -> List[PartitionFieldSummary]: + types = [field.field_type for field in spec.partition_type(schema).fields] + field_stats = [PartitionFieldStats(field_type) for field_type in types] + for partition_keys in partitions: + for i, field_type in enumerate(types): + if not isinstance(field_type, PrimitiveType): + raise ValueError(f"Expected a primitive type for the partition field, got {field_type}") + partition_key = partition_keys[i] + field_stats[i].update(partition_key) + return [field.to_summary() for field in field_stats] + + +MANIFEST_FILE_SCHEMA: Schema = Schema( + NestedField(500, "manifest_path", StringType(), required=True, doc="Location URI with FS scheme"), + NestedField(501, "manifest_length", LongType(), required=True), + NestedField(502, "partition_spec_id", IntegerType(), required=True), + NestedField(517, "content", IntegerType(), required=False, initial_default=ManifestContent.DATA), + NestedField(515, "sequence_number", LongType(), required=False, initial_default=0), + NestedField(516, "min_sequence_number", LongType(), required=False, initial_default=0), + NestedField(503, "added_snapshot_id", LongType(), required=False), + NestedField(504, "added_files_count", IntegerType(), required=False), + NestedField(505, "existing_files_count", IntegerType(), required=False), + NestedField(506, "deleted_files_count", IntegerType(), required=False), + NestedField(512, "added_rows_count", LongType(), required=False), + NestedField(513, "existing_rows_count", LongType(), required=False), + NestedField(514, "deleted_rows_count", LongType(), required=False), + NestedField(507, "partitions", ListType(508, PARTITION_FIELD_SUMMARY_TYPE, element_required=True), required=False), + NestedField(519, "key_metadata", BinaryType(), required=False), +) + +MANIFEST_FILE_SCHEMA_STRUCT = MANIFEST_FILE_SCHEMA.as_struct() + +POSITIONAL_DELETE_SCHEMA = Schema( + NestedField(2147483546, "file_path", StringType()), NestedField(2147483545, "pos", IntegerType()) +) + + +class ManifestFile(Record): + __slots__ = ( + "manifest_path", + "manifest_length", + "partition_spec_id", + "content", + "sequence_number", + "min_sequence_number", + "added_snapshot_id", + "added_files_count", + "existing_files_count", + "deleted_files_count", + "added_rows_count", + "existing_rows_count", + "deleted_rows_count", + "partitions", + "key_metadata", + ) + manifest_path: str + manifest_length: int + partition_spec_id: int + content: ManifestContent + sequence_number: int + min_sequence_number: int + added_snapshot_id: int + added_files_count: Optional[int] + existing_files_count: Optional[int] + deleted_files_count: Optional[int] + added_rows_count: Optional[int] + existing_rows_count: Optional[int] + deleted_rows_count: Optional[int] + partitions: Optional[List[PartitionFieldSummary]] + key_metadata: Optional[bytes] + + def __init__(self, *data: Any, **named_data: Any) -> None: + super().__init__(*data, **{"struct": MANIFEST_FILE_SCHEMA_STRUCT, **named_data}) + + def has_added_files(self) -> bool: + return self.added_files_count is None or self.added_files_count > 0 + + def has_existing_files(self) -> bool: + return self.existing_files_count is None or self.existing_files_count > 0 + + def fetch_manifest_entry(self, io: FileIO, discard_deleted: bool = True) -> List[ManifestEntry]: + """ + Read the manifest entries from the manifest file. + + Args: + io: The FileIO to fetch the file. + discard_deleted: Filter on live entries. + + Returns: + An Iterator of manifest entries. + """ + input_file = io.new_input(self.manifest_path) + with AvroFile[ManifestEntry]( + input_file, + MANIFEST_ENTRY_SCHEMA, + read_types={-1: ManifestEntry, 2: DataFile}, + read_enums={0: ManifestEntryStatus, 101: FileFormat, 134: DataFileContent}, + ) as reader: + return [ + _inherit_sequence_number(entry, self) + for entry in reader + if not discard_deleted or entry.status != ManifestEntryStatus.DELETED + ] + + +def read_manifest_list(input_file: InputFile) -> Iterator[ManifestFile]: + """ + Read the manifests from the manifest list. + + Args: + input_file: The input file where the stream can be read from. + + Returns: + An iterator of ManifestFiles that are part of the list. + """ + with AvroFile[ManifestFile]( + input_file, + MANIFEST_FILE_SCHEMA, + read_types={-1: ManifestFile, 508: PartitionFieldSummary}, + read_enums={517: ManifestContent}, + ) as reader: + yield from reader + + +def _inherit_sequence_number(entry: ManifestEntry, manifest: ManifestFile) -> ManifestEntry: + """Inherits the sequence numbers. + + More information in the spec: https://iceberg.apache.org/spec/#sequence-number-inheritance + + Args: + entry: The manifest entry that has null sequence numbers. + manifest: The manifest that has a sequence number. + + Returns: + The manifest entry with the sequence numbers set. + """ + # The snapshot_id is required in V1, inherit with V2 when null + if entry.snapshot_id is None: + entry.snapshot_id = manifest.added_snapshot_id + + # in v1 tables, the data sequence number is not persisted and can be safely defaulted to 0 + # in v2 tables, the data sequence number should be inherited iff the entry status is ADDED + if entry.data_sequence_number is None and (manifest.sequence_number == 0 or entry.status == ManifestEntryStatus.ADDED): + entry.data_sequence_number = manifest.sequence_number + + # in v1 tables, the file sequence number is not persisted and can be safely defaulted to 0 + # in v2 tables, the file sequence number should be inherited iff the entry status is ADDED + if entry.file_sequence_number is None and (manifest.sequence_number == 0 or entry.status == ManifestEntryStatus.ADDED): + # Only available in V2, always 0 in V1 + entry.file_sequence_number = manifest.sequence_number + + return entry + + +class ManifestWriter(ABC): + closed: bool + _spec: PartitionSpec + _schema: Schema + _output_file: OutputFile + _writer: AvroOutputFile[ManifestEntry] + _snapshot_id: int + _meta: Dict[str, str] + _added_files: int + _added_rows: int + _existing_files: int + _existing_rows: int + _deleted_files: int + _deleted_rows: int + _min_data_sequence_number: Optional[int] + _partitions: List[Record] + + def __init__(self, spec: PartitionSpec, schema: Schema, output_file: OutputFile, snapshot_id: int, meta: Dict[str, str]): + self.closed = False + self._spec = spec + self._schema = schema + self._output_file = output_file + self._snapshot_id = snapshot_id + self._meta = meta + + self._added_files = 0 + self._added_rows = 0 + self._existing_files = 0 + self._existing_rows = 0 + self._deleted_files = 0 + self._deleted_rows = 0 + self._min_data_sequence_number = None + self._partitions = [] + + def __enter__(self) -> ManifestWriter: + """Open the writer.""" + self._writer = self.new_writer() + self._writer.__enter__() + return self + + def __exit__( + self, + exc_type: Optional[Type[BaseException]], + exc_value: Optional[BaseException], + traceback: Optional[TracebackType], + ) -> None: + """Close the writer.""" + self.closed = True + self._writer.__exit__(exc_type, exc_value, traceback) + + @abstractmethod + def content(self) -> ManifestContent: + ... + + @abstractmethod + def new_writer(self) -> AvroOutputFile[ManifestEntry]: + ... + + @abstractmethod + def prepare_entry(self, entry: ManifestEntry) -> ManifestEntry: + ... + + def to_manifest_file(self) -> ManifestFile: + """Return the manifest file.""" + # once the manifest file is generated, no more entries can be added + self.closed = True + min_sequence_number = self._min_data_sequence_number or UNASSIGNED_SEQ + return ManifestFile( + manifest_path=self._output_file.location, + manifest_length=len(self._writer.output_file), + partition_spec_id=self._spec.spec_id, + content=self.content(), + sequence_number=UNASSIGNED_SEQ, + min_sequence_number=min_sequence_number, + added_snapshot_id=self._snapshot_id, + added_files_count=self._added_files, + existing_files_count=self._existing_files, + deleted_files_count=self._deleted_files, + added_rows_count=self._added_rows, + existing_rows_count=self._existing_rows, + deleted_rows_count=self._deleted_rows, + partitions=construct_partition_summaries(self._spec, self._schema, self._partitions), + key_metadatas=None, + ) + + def add_entry(self, entry: ManifestEntry) -> ManifestWriter: + if self.closed: + raise RuntimeError("Cannot add entry to closed manifest writer") + if entry.status == ManifestEntryStatus.ADDED: + self._added_files += 1 + self._added_rows += entry.data_file.record_count + elif entry.status == ManifestEntryStatus.EXISTING: + self._existing_files += 1 + self._existing_rows += entry.data_file.record_count + elif entry.status == ManifestEntryStatus.DELETED: + self._deleted_files += 1 + self._deleted_rows += entry.data_file.record_count + + self._partitions.append(entry.data_file.partition) + + if ( + (entry.status == ManifestEntryStatus.ADDED or entry.status == ManifestEntryStatus.EXISTING) + and entry.data_sequence_number is not None + and (self._min_data_sequence_number is None or entry.data_sequence_number < self._min_data_sequence_number) + ): + self._min_data_sequence_number = entry.data_sequence_number + + self._writer.write_block([self.prepare_entry(entry)]) + return self + + +class ManifestWriterV1(ManifestWriter): + def __init__(self, spec: PartitionSpec, schema: Schema, output_file: OutputFile, snapshot_id: int): + super().__init__( + spec, + schema, + output_file, + snapshot_id, + { + "schema": schema.json(), + "partition-spec": spec.json(), + "partition-spec-id": str(spec.spec_id), + "format-version": "1", + }, + ) + + def content(self) -> ManifestContent: + return ManifestContent.DATA + + def new_writer(self) -> AvroOutputFile[ManifestEntry]: + v1_data_file_type = data_file_with_partition(self._spec.partition_type(self._schema), format_version=1) + v1_manifest_entry_schema = manifest_entry_schema_with_data_file(v1_data_file_type) + return AvroOutputFile[ManifestEntry](self._output_file, v1_manifest_entry_schema, "manifest_entry", self._meta) + + def prepare_entry(self, entry: ManifestEntry) -> ManifestEntry: + wrapped_entry = ManifestEntry(*entry.record_fields()) + wrapped_entry.data_file.block_size_in_bytes = DEFAULT_BLOCK_SIZE + return wrapped_entry + + +class ManifestWriterV2(ManifestWriter): + def __init__(self, spec: PartitionSpec, schema: Schema, output_file: OutputFile, snapshot_id: int): + super().__init__( + spec, + schema, + output_file, + snapshot_id, + { + "schema": schema.json(), + "partition-spec": spec.json(), + "partition-spec-id": str(spec.spec_id), + "format-version": "2", + "content": "data", + }, + ) + + def content(self) -> ManifestContent: + return ManifestContent.DATA + + def new_writer(self) -> AvroOutputFile[ManifestEntry]: + v2_data_file_type = data_file_with_partition(self._spec.partition_type(self._schema), format_version=2) + v2_manifest_entry_schema = manifest_entry_schema_with_data_file(v2_data_file_type) + return AvroOutputFile[ManifestEntry](self._output_file, v2_manifest_entry_schema, "manifest_entry", self._meta) + + def prepare_entry(self, entry: ManifestEntry) -> ManifestEntry: + if entry.data_sequence_number is None: + if entry.snapshot_id is not None and entry.snapshot_id != self._snapshot_id: + raise ValueError(f"Found unassigned sequence number for an entry from snapshot: {entry.snapshot_id}") + if entry.status != ManifestEntryStatus.ADDED: + raise ValueError("Only entries with status ADDED can have null sequence number") + # In v2, we should not write block_size_in_bytes field + wrapped_data_file_v2_debug = DataFile( + format_version=2, + content=entry.data_file.content, + file_path=entry.data_file.file_path, + file_format=entry.data_file.file_format, + partition=entry.data_file.partition, + record_count=entry.data_file.record_count, + file_size_in_bytes=entry.data_file.file_size_in_bytes, + column_sizes=entry.data_file.column_sizes, + value_counts=entry.data_file.value_counts, + null_value_counts=entry.data_file.null_value_counts, + nan_value_counts=entry.data_file.nan_value_counts, + lower_bounds=entry.data_file.lower_bounds, + upper_bounds=entry.data_file.upper_bounds, + key_metadata=entry.data_file.key_metadata, + split_offsets=entry.data_file.split_offsets, + equality_ids=entry.data_file.equality_ids, + sort_order_id=entry.data_file.sort_order_id, + spec_id=entry.data_file.spec_id, + ) + wrapped_entry = ManifestEntry( + status=entry.status, + snapshot_id=entry.snapshot_id, + data_sequence_number=entry.data_sequence_number, + file_sequence_number=entry.file_sequence_number, + data_file=wrapped_data_file_v2_debug, + ) + return wrapped_entry + + +def write_manifest( + format_version: Literal[1, 2], spec: PartitionSpec, schema: Schema, output_file: OutputFile, snapshot_id: int +) -> ManifestWriter: + if format_version == 1: + return ManifestWriterV1(spec, schema, output_file, snapshot_id) + elif format_version == 2: + return ManifestWriterV2(spec, schema, output_file, snapshot_id) + else: + raise ValueError(f"Cannot write manifest for table version: {format_version}") + + +class ManifestListWriter(ABC): + _output_file: OutputFile + _meta: Dict[str, str] + _manifest_files: List[ManifestFile] + _commit_snapshot_id: int + _writer: AvroOutputFile[ManifestFile] + + def __init__(self, output_file: OutputFile, meta: Dict[str, str]): + self._output_file = output_file + self._meta = meta + self._manifest_files = [] + + def __enter__(self) -> ManifestListWriter: + """Open the writer for writing.""" + self._writer = AvroOutputFile[ManifestFile](self._output_file, MANIFEST_FILE_SCHEMA, "manifest_file", self._meta) + self._writer.__enter__() + return self + + def __exit__( + self, + exc_type: Optional[Type[BaseException]], + exc_value: Optional[BaseException], + traceback: Optional[TracebackType], + ) -> None: + """Close the writer.""" + self._writer.__exit__(exc_type, exc_value, traceback) + return + + @abstractmethod + def prepare_manifest(self, manifest_file: ManifestFile) -> ManifestFile: + ... + + def add_manifests(self, manifest_files: List[ManifestFile]) -> ManifestListWriter: + self._writer.write_block([self.prepare_manifest(manifest_file) for manifest_file in manifest_files]) + return self + + +class ManifestListWriterV1(ManifestListWriter): + def __init__(self, output_file: OutputFile, snapshot_id: int, parent_snapshot_id: int): + super().__init__( + output_file, {"snapshot-id": str(snapshot_id), "parent-snapshot-id": str(parent_snapshot_id), "format-version": "1"} + ) + + def prepare_manifest(self, manifest_file: ManifestFile) -> ManifestFile: + if manifest_file.content != ManifestContent.DATA: + raise ValidationError("Cannot store delete manifests in a v1 table") + return manifest_file + + +class ManifestListWriterV2(ManifestListWriter): + _commit_snapshot_id: int + _sequence_number: int + + def __init__(self, output_file: OutputFile, snapshot_id: int, parent_snapshot_id: int, sequence_number: int): + super().__init__( + output_file, + { + "snapshot-id": str(snapshot_id), + "parent-snapshot-id": str(parent_snapshot_id), + "sequence-number": str(sequence_number), + "format-version": "2", + }, + ) + self._commit_snapshot_id = snapshot_id + self._sequence_number = sequence_number + + def prepare_manifest(self, manifest_file: ManifestFile) -> ManifestFile: + wrapped_manifest_file = ManifestFile(*manifest_file.record_fields()) + + if wrapped_manifest_file.sequence_number == UNASSIGNED_SEQ: + # if the sequence number is being assigned here, then the manifest must be created by the current operation. + # To validate this, check that the snapshot id matches the current commit + if self._commit_snapshot_id != wrapped_manifest_file.added_snapshot_id: + raise ValueError( + f"Found unassigned sequence number for a manifest from snapshot: {wrapped_manifest_file.added_snapshot_id}" + ) + wrapped_manifest_file.sequence_number = self._sequence_number + + if wrapped_manifest_file.min_sequence_number == UNASSIGNED_SEQ: + if self._commit_snapshot_id != wrapped_manifest_file.added_snapshot_id: + raise ValueError( + f"Found unassigned sequence number for a manifest from snapshot: {wrapped_manifest_file.added_snapshot_id}" + ) + # if the min sequence number is not determined, then there was no assigned sequence number for any file + # written to the wrapped manifest. Replace the unassigned sequence number with the one for this commit + wrapped_manifest_file.min_sequence_number = self._sequence_number + return wrapped_manifest_file + + +def write_manifest_list( + format_version: Literal[1, 2], output_file: OutputFile, snapshot_id: int, parent_snapshot_id: int, sequence_number: int +) -> ManifestListWriter: + if format_version == 1: + return ManifestListWriterV1(output_file, snapshot_id, parent_snapshot_id) + elif format_version == 2: + return ManifestListWriterV2(output_file, snapshot_id, parent_snapshot_id, sequence_number) + else: + raise ValueError(f"Cannot write manifest list for table version: {format_version}") diff --git a/pyiceberg/partitioning.py b/pyiceberg/partitioning.py new file mode 100644 index 0000000000..f6307f0f8c --- /dev/null +++ b/pyiceberg/partitioning.py @@ -0,0 +1,217 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +from __future__ import annotations + +from functools import cached_property +from typing import ( + Any, + Dict, + List, + Optional, + Tuple, +) + +from pydantic import ( + BeforeValidator, + Field, + PlainSerializer, + WithJsonSchema, +) +from typing_extensions import Annotated + +from pyiceberg.schema import Schema +from pyiceberg.transforms import Transform, parse_transform +from pyiceberg.typedef import IcebergBaseModel +from pyiceberg.types import NestedField, StructType + +INITIAL_PARTITION_SPEC_ID = 0 +PARTITION_FIELD_ID_START: int = 1000 + + +class PartitionField(IcebergBaseModel): + """PartitionField represents how one partition value is derived from the source column via transformation. + + Attributes: + source_id(int): The source column id of table's schema. + field_id(int): The partition field id across all the table partition specs. + transform(Transform): The transform used to produce partition values from source column. + name(str): The name of this partition field. + """ + + source_id: int = Field(alias="source-id") + field_id: int = Field(alias="field-id") + transform: Annotated[ # type: ignore + Transform, + BeforeValidator(parse_transform), + PlainSerializer(lambda c: str(c), return_type=str), # pylint: disable=W0108 + WithJsonSchema({"type": "string"}, mode="serialization"), + ] = Field() + name: str = Field() + + def __init__( + self, + source_id: Optional[int] = None, + field_id: Optional[int] = None, + transform: Optional[Transform[Any, Any]] = None, + name: Optional[str] = None, + **data: Any, + ): + if source_id is not None: + data["source-id"] = source_id + if field_id is not None: + data["field-id"] = field_id + if transform is not None: + data["transform"] = transform + if name is not None: + data["name"] = name + + super().__init__(**data) + + def __str__(self) -> str: + """Return the string representation of the PartitionField class.""" + return f"{self.field_id}: {self.name}: {self.transform}({self.source_id})" + + +class PartitionSpec(IcebergBaseModel): + """ + PartitionSpec captures the transformation from table data to partition values. + + Attributes: + spec_id(int): any change to PartitionSpec will produce a new specId. + fields(Tuple[PartitionField): list of partition fields to produce partition values. + """ + + spec_id: int = Field(alias="spec-id", default=INITIAL_PARTITION_SPEC_ID) + fields: Tuple[PartitionField, ...] = Field(default_factory=tuple) + + def __init__( + self, + *fields: PartitionField, + **data: Any, + ): + if fields: + data["fields"] = tuple(fields) + super().__init__(**data) + + def __eq__(self, other: Any) -> bool: + """ + Produce a boolean to return True if two objects are considered equal. + + Note: + Equality of PartitionSpec is determined by spec_id and partition fields only. + """ + if not isinstance(other, PartitionSpec): + return False + return self.spec_id == other.spec_id and self.fields == other.fields + + def __str__(self) -> str: + """ + Produce a human-readable string representation of PartitionSpec. + + Note: + Only include list of partition fields in the PartitionSpec's string representation. + """ + result_str = "[" + if self.fields: + result_str += "\n " + "\n ".join([str(field) for field in self.fields]) + "\n" + result_str += "]" + return result_str + + def __repr__(self) -> str: + """Return the string representation of the PartitionSpec class.""" + fields = f"{', '.join(repr(column) for column in self.fields)}, " if self.fields else "" + return f"PartitionSpec({fields}spec_id={self.spec_id})" + + def is_unpartitioned(self) -> bool: + return not self.fields + + @property + def last_assigned_field_id(self) -> int: + if self.fields: + return max(pf.field_id for pf in self.fields) + return PARTITION_FIELD_ID_START + + @cached_property + def source_id_to_fields_map(self) -> Dict[int, List[PartitionField]]: + source_id_to_fields_map: Dict[int, List[PartitionField]] = {} + for partition_field in self.fields: + existing = source_id_to_fields_map.get(partition_field.source_id, []) + existing.append(partition_field) + source_id_to_fields_map[partition_field.source_id] = existing + return source_id_to_fields_map + + def fields_by_source_id(self, field_id: int) -> List[PartitionField]: + return self.source_id_to_fields_map.get(field_id, []) + + def compatible_with(self, other: PartitionSpec) -> bool: + """Produce a boolean to return True if two PartitionSpec are considered compatible.""" + if self == other: + return True + if len(self.fields) != len(other.fields): + return False + return all( + this_field.source_id == that_field.source_id + and this_field.transform == that_field.transform + and this_field.name == that_field.name + for this_field, that_field in zip(self.fields, other.fields) + ) + + def partition_type(self, schema: Schema) -> StructType: + """Produce a struct of the PartitionSpec. + + The partition fields should be optional: + + - All partition transforms are required to produce null if the input value is null, so it can + happen when the source column is optional. + - Partition fields may be added later, in which case not all files would have the result field, + and it may be null. + + There is a case where we can guarantee that a partition field in the first and only partition spec + that uses a required source column will never be null, but it doesn't seem worth tracking this case. + + :param schema: The schema to bind to. + :return: A StructType that represents the PartitionSpec, with a NestedField for each PartitionField. + """ + nested_fields = [] + for field in self.fields: + source_type = schema.find_type(field.source_id) + result_type = field.transform.result_type(source_type) + nested_fields.append(NestedField(field.field_id, field.name, result_type, required=False)) + return StructType(*nested_fields) + + +UNPARTITIONED_PARTITION_SPEC = PartitionSpec(spec_id=0) + + +def assign_fresh_partition_spec_ids(spec: PartitionSpec, old_schema: Schema, fresh_schema: Schema) -> PartitionSpec: + partition_fields = [] + for pos, field in enumerate(spec.fields): + original_column_name = old_schema.find_column_name(field.source_id) + if original_column_name is None: + raise ValueError(f"Could not find in old schema: {field}") + fresh_field = fresh_schema.find_field(original_column_name) + if fresh_field is None: + raise ValueError(f"Could not find field in fresh schema: {original_column_name}") + partition_fields.append( + PartitionField( + name=field.name, + source_id=fresh_field.field_id, + field_id=PARTITION_FIELD_ID_START + pos, + transform=field.transform, + ) + ) + return PartitionSpec(*partition_fields, spec_id=INITIAL_PARTITION_SPEC_ID) diff --git a/pyiceberg/py.typed b/pyiceberg/py.typed new file mode 100644 index 0000000000..fb618a1375 --- /dev/null +++ b/pyiceberg/py.typed @@ -0,0 +1,18 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# Marker file for PEP 561 diff --git a/pyiceberg/schema.py b/pyiceberg/schema.py new file mode 100644 index 0000000000..28101809c7 --- /dev/null +++ b/pyiceberg/schema.py @@ -0,0 +1,1497 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# pylint: disable=W0511 +from __future__ import annotations + +import itertools +from abc import ABC, abstractmethod +from dataclasses import dataclass +from functools import cached_property, partial, singledispatch +from typing import ( + Any, + Callable, + Dict, + Generic, + List, + Literal, + Optional, + Set, + Tuple, + TypeVar, + Union, +) + +from pydantic import Field, PrivateAttr, model_validator + +from pyiceberg.exceptions import ResolveError +from pyiceberg.typedef import EMPTY_DICT, IcebergBaseModel, StructProtocol +from pyiceberg.types import ( + BinaryType, + BooleanType, + DateType, + DecimalType, + DoubleType, + FixedType, + FloatType, + IcebergType, + IntegerType, + ListType, + LongType, + MapType, + NestedField, + PrimitiveType, + StringType, + StructType, + TimestampType, + TimestamptzType, + TimeType, + UUIDType, +) + +T = TypeVar("T") +P = TypeVar("P") + +INITIAL_SCHEMA_ID = 0 + + +class Schema(IcebergBaseModel): + """A table Schema. + + Example: + >>> from pyiceberg import schema + >>> from pyiceberg import types + """ + + type: Literal["struct"] = "struct" + fields: Tuple[NestedField, ...] = Field(default_factory=tuple) + schema_id: int = Field(alias="schema-id", default=INITIAL_SCHEMA_ID) + identifier_field_ids: List[int] = Field(alias="identifier-field-ids", default_factory=list) + + _name_to_id: Dict[str, int] = PrivateAttr() + + def __init__(self, *fields: NestedField, **data: Any): + if fields: + data["fields"] = fields + super().__init__(**data) + self._name_to_id = index_by_name(self) + + def __str__(self) -> str: + """Return the string representation of the Schema class.""" + return "table {\n" + "\n".join([" " + str(field) for field in self.columns]) + "\n}" + + def __repr__(self) -> str: + """Return the string representation of the Schema class.""" + return f"Schema({', '.join(repr(column) for column in self.columns)}, schema_id={self.schema_id}, identifier_field_ids={self.identifier_field_ids})" + + def __len__(self) -> int: + """Return the length of an instance of the Literal class.""" + return len(self.fields) + + def __eq__(self, other: Any) -> bool: + """Return the equality of two instances of the Schema class.""" + if not other: + return False + + if not isinstance(other, Schema): + return False + + if len(self.columns) != len(other.columns): + return False + + identifier_field_ids_is_equal = self.identifier_field_ids == other.identifier_field_ids + schema_is_equal = all(lhs == rhs for lhs, rhs in zip(self.columns, other.columns)) + + return identifier_field_ids_is_equal and schema_is_equal + + @model_validator(mode="after") + def check_schema(self) -> Schema: + if self.identifier_field_ids: + for field_id in self.identifier_field_ids: + self._validate_identifier_field(field_id) + + return self + + @property + def columns(self) -> Tuple[NestedField, ...]: + """A tuple of the top-level fields.""" + return self.fields + + @cached_property + def _lazy_id_to_field(self) -> Dict[int, NestedField]: + """Return an index of field ID to NestedField instance. + + This is calculated once when called for the first time. Subsequent calls to this method will use a cached index. + """ + return index_by_id(self) + + @cached_property + def _lazy_id_to_parent(self) -> Dict[int, int]: + """Returns an index of field ID to parent field IDs. + + This is calculated once when called for the first time. Subsequent calls to this method will use a cached index. + """ + return _index_parents(self) + + @cached_property + def _lazy_name_to_id_lower(self) -> Dict[str, int]: + """Return an index of lower-case field names to field IDs. + + This is calculated once when called for the first time. Subsequent calls to this method will use a cached index. + """ + return {name.lower(): field_id for name, field_id in self._name_to_id.items()} + + @cached_property + def _lazy_id_to_name(self) -> Dict[int, str]: + """Return an index of field ID to full name. + + This is calculated once when called for the first time. Subsequent calls to this method will use a cached index. + """ + return index_name_by_id(self) + + @cached_property + def _lazy_id_to_accessor(self) -> Dict[int, Accessor]: + """Return an index of field ID to accessor. + + This is calculated once when called for the first time. Subsequent calls to this method will use a cached index. + """ + return build_position_accessors(self) + + def as_struct(self) -> StructType: + """Return the schema as a struct.""" + return StructType(*self.fields) + + def find_field(self, name_or_id: Union[str, int], case_sensitive: bool = True) -> NestedField: + """Find a field using a field name or field ID. + + Args: + name_or_id (Union[str, int]): Either a field name or a field ID. + case_sensitive (bool, optional): Whether to perform a case-sensitive lookup using a field name. Defaults to True. + + Raises: + ValueError: When the value cannot be found. + + Returns: + NestedField: The matched NestedField. + """ + if isinstance(name_or_id, int): + if name_or_id not in self._lazy_id_to_field: + raise ValueError(f"Could not find field with id: {name_or_id}") + return self._lazy_id_to_field[name_or_id] + + if case_sensitive: + field_id = self._name_to_id.get(name_or_id) + else: + field_id = self._lazy_name_to_id_lower.get(name_or_id.lower()) + + if field_id is None: + raise ValueError(f"Could not find field with name {name_or_id}, case_sensitive={case_sensitive}") + + return self._lazy_id_to_field[field_id] + + def find_type(self, name_or_id: Union[str, int], case_sensitive: bool = True) -> IcebergType: + """Find a field type using a field name or field ID. + + Args: + name_or_id (Union[str, int]): Either a field name or a field ID. + case_sensitive (bool, optional): Whether to perform a case-sensitive lookup using a field name. Defaults to True. + + Returns: + NestedField: The type of the matched NestedField. + """ + field = self.find_field(name_or_id=name_or_id, case_sensitive=case_sensitive) + if not field: + raise ValueError(f"Could not find field with name or id {name_or_id}, case_sensitive={case_sensitive}") + return field.field_type + + @property + def highest_field_id(self) -> int: + return max(self._lazy_id_to_name.keys(), default=0) + + def find_column_name(self, column_id: int) -> Optional[str]: + """Find a column name given a column ID. + + Args: + column_id (int): The ID of the column. + + Returns: + str: The column name (or None if the column ID cannot be found). + """ + return self._lazy_id_to_name.get(column_id) + + @property + def column_names(self) -> List[str]: + """ + Return a list of all the column names, including nested fields. + + Excludes short names. + + Returns: + List[str]: The column names. + """ + return list(self._lazy_id_to_name.values()) + + def accessor_for_field(self, field_id: int) -> Accessor: + """Find a schema position accessor given a field ID. + + Args: + field_id (int): The ID of the field. + + Raises: + ValueError: When the value cannot be found. + + Returns: + Accessor: An accessor for the given field ID. + """ + if field_id not in self._lazy_id_to_accessor: + raise ValueError(f"Could not find accessor for field with id: {field_id}") + + return self._lazy_id_to_accessor[field_id] + + def identifier_field_names(self) -> Set[str]: + """Return the names of the identifier fields. + + Returns: + Set of names of the identifier fields + """ + ids = set() + for field_id in self.identifier_field_ids: + column_name = self.find_column_name(field_id) + if column_name is None: + raise ValueError(f"Could not find identifier column id: {field_id}") + ids.add(column_name) + + return ids + + def select(self, *names: str, case_sensitive: bool = True) -> Schema: + """Return a new schema instance pruned to a subset of columns. + + Args: + names (List[str]): A list of column names. + case_sensitive (bool, optional): Whether to perform a case-sensitive lookup for each column name. Defaults to True. + + Returns: + Schema: A new schema with pruned columns. + + Raises: + ValueError: If a column is selected that doesn't exist. + """ + try: + if case_sensitive: + ids = {self._name_to_id[name] for name in names} + else: + ids = {self._lazy_name_to_id_lower[name.lower()] for name in names} + except KeyError as e: + raise ValueError(f"Could not find column: {e}") from e + + return prune_columns(self, ids) + + @property + def field_ids(self) -> Set[int]: + """Return the IDs of the current schema.""" + return set(self._name_to_id.values()) + + def _validate_identifier_field(self, field_id: int) -> None: + """Validate that the field with the given ID is a valid identifier field. + + Args: + field_id: The ID of the field to validate. + + Raises: + ValueError: If the field is not valid. + """ + field = self.find_field(field_id) + if not field.field_type.is_primitive: + raise ValueError(f"Identifier field {field_id} invalid: not a primitive type field") + + if not field.required: + raise ValueError(f"Identifier field {field_id} invalid: not a required field") + + if isinstance(field.field_type, (DoubleType, FloatType)): + raise ValueError(f"Identifier field {field_id} invalid: must not be float or double field") + + # Check whether the nested field is in a chain of required struct fields + # Exploring from root for better error message for list and map types + parent_id = self._lazy_id_to_parent.get(field.field_id) + fields: List[int] = [] + while parent_id is not None: + fields.append(parent_id) + parent_id = self._lazy_id_to_parent.get(parent_id) + + while fields: + parent = self.find_field(fields.pop()) + if not parent.field_type.is_struct: + raise ValueError(f"Cannot add field {field.name} as an identifier field: must not be nested in {parent}") + + if not parent.required: + raise ValueError( + f"Cannot add field {field.name} as an identifier field: must not be nested in an optional field {parent}" + ) + + +class SchemaVisitor(Generic[T], ABC): + def before_field(self, field: NestedField) -> None: + """Override this method to perform an action immediately before visiting a field.""" + + def after_field(self, field: NestedField) -> None: + """Override this method to perform an action immediately after visiting a field.""" + + def before_list_element(self, element: NestedField) -> None: + """Override this method to perform an action immediately before visiting an element within a ListType.""" + self.before_field(element) + + def after_list_element(self, element: NestedField) -> None: + """Override this method to perform an action immediately after visiting an element within a ListType.""" + self.after_field(element) + + def before_map_key(self, key: NestedField) -> None: + """Override this method to perform an action immediately before visiting a key within a MapType.""" + self.before_field(key) + + def after_map_key(self, key: NestedField) -> None: + """Override this method to perform an action immediately after visiting a key within a MapType.""" + self.after_field(key) + + def before_map_value(self, value: NestedField) -> None: + """Override this method to perform an action immediately before visiting a value within a MapType.""" + self.before_field(value) + + def after_map_value(self, value: NestedField) -> None: + """Override this method to perform an action immediately after visiting a value within a MapType.""" + self.after_field(value) + + @abstractmethod + def schema(self, schema: Schema, struct_result: T) -> T: + """Visit a Schema.""" + + @abstractmethod + def struct(self, struct: StructType, field_results: List[T]) -> T: + """Visit a StructType.""" + + @abstractmethod + def field(self, field: NestedField, field_result: T) -> T: + """Visit a NestedField.""" + + @abstractmethod + def list(self, list_type: ListType, element_result: T) -> T: + """Visit a ListType.""" + + @abstractmethod + def map(self, map_type: MapType, key_result: T, value_result: T) -> T: + """Visit a MapType.""" + + @abstractmethod + def primitive(self, primitive: PrimitiveType) -> T: + """Visit a PrimitiveType.""" + + +class PreOrderSchemaVisitor(Generic[T], ABC): + @abstractmethod + def schema(self, schema: Schema, struct_result: Callable[[], T]) -> T: + """Visit a Schema.""" + + @abstractmethod + def struct(self, struct: StructType, field_results: List[Callable[[], T]]) -> T: + """Visit a StructType.""" + + @abstractmethod + def field(self, field: NestedField, field_result: Callable[[], T]) -> T: + """Visit a NestedField.""" + + @abstractmethod + def list(self, list_type: ListType, element_result: Callable[[], T]) -> T: + """Visit a ListType.""" + + @abstractmethod + def map(self, map_type: MapType, key_result: Callable[[], T], value_result: Callable[[], T]) -> T: + """Visit a MapType.""" + + @abstractmethod + def primitive(self, primitive: PrimitiveType) -> T: + """Visit a PrimitiveType.""" + + +class SchemaWithPartnerVisitor(Generic[P, T], ABC): + def before_field(self, field: NestedField, field_partner: Optional[P]) -> None: + """Override this method to perform an action immediately before visiting a field.""" + + def after_field(self, field: NestedField, field_partner: Optional[P]) -> None: + """Override this method to perform an action immediately after visiting a field.""" + + def before_list_element(self, element: NestedField, element_partner: Optional[P]) -> None: + """Override this method to perform an action immediately before visiting an element within a ListType.""" + self.before_field(element, element_partner) + + def after_list_element(self, element: NestedField, element_partner: Optional[P]) -> None: + """Override this method to perform an action immediately after visiting an element within a ListType.""" + self.after_field(element, element_partner) + + def before_map_key(self, key: NestedField, key_partner: Optional[P]) -> None: + """Override this method to perform an action immediately before visiting a key within a MapType.""" + self.before_field(key, key_partner) + + def after_map_key(self, key: NestedField, key_partner: Optional[P]) -> None: + """Override this method to perform an action immediately after visiting a key within a MapType.""" + self.after_field(key, key_partner) + + def before_map_value(self, value: NestedField, value_partner: Optional[P]) -> None: + """Override this method to perform an action immediately before visiting a value within a MapType.""" + self.before_field(value, value_partner) + + def after_map_value(self, value: NestedField, value_partner: Optional[P]) -> None: + """Override this method to perform an action immediately after visiting a value within a MapType.""" + self.after_field(value, value_partner) + + @abstractmethod + def schema(self, schema: Schema, schema_partner: Optional[P], struct_result: T) -> T: + """Visit a schema with a partner.""" + + @abstractmethod + def struct(self, struct: StructType, struct_partner: Optional[P], field_results: List[T]) -> T: + """Visit a struct type with a partner.""" + + @abstractmethod + def field(self, field: NestedField, field_partner: Optional[P], field_result: T) -> T: + """Visit a nested field with a partner.""" + + @abstractmethod + def list(self, list_type: ListType, list_partner: Optional[P], element_result: T) -> T: + """Visit a list type with a partner.""" + + @abstractmethod + def map(self, map_type: MapType, map_partner: Optional[P], key_result: T, value_result: T) -> T: + """Visit a map type with a partner.""" + + @abstractmethod + def primitive(self, primitive: PrimitiveType, primitive_partner: Optional[P]) -> T: + """Visit a primitive type with a partner.""" + + +class PrimitiveWithPartnerVisitor(SchemaWithPartnerVisitor[P, T]): + def primitive(self, primitive: PrimitiveType, primitive_partner: Optional[P]) -> T: + """Visit a PrimitiveType.""" + if isinstance(primitive, BooleanType): + return self.visit_boolean(primitive, primitive_partner) + elif isinstance(primitive, IntegerType): + return self.visit_integer(primitive, primitive_partner) + elif isinstance(primitive, LongType): + return self.visit_long(primitive, primitive_partner) + elif isinstance(primitive, FloatType): + return self.visit_float(primitive, primitive_partner) + elif isinstance(primitive, DoubleType): + return self.visit_double(primitive, primitive_partner) + elif isinstance(primitive, DecimalType): + return self.visit_decimal(primitive, primitive_partner) + elif isinstance(primitive, DateType): + return self.visit_date(primitive, primitive_partner) + elif isinstance(primitive, TimeType): + return self.visit_time(primitive, primitive_partner) + elif isinstance(primitive, TimestampType): + return self.visit_timestamp(primitive, primitive_partner) + elif isinstance(primitive, TimestamptzType): + return self.visit_timestamptz(primitive, primitive_partner) + elif isinstance(primitive, StringType): + return self.visit_string(primitive, primitive_partner) + elif isinstance(primitive, UUIDType): + return self.visit_uuid(primitive, primitive_partner) + elif isinstance(primitive, FixedType): + return self.visit_fixed(primitive, primitive_partner) + elif isinstance(primitive, BinaryType): + return self.visit_binary(primitive, primitive_partner) + else: + raise ValueError(f"Unknown type: {primitive}") + + @abstractmethod + def visit_boolean(self, boolean_type: BooleanType, partner: Optional[P]) -> T: + """Visit a BooleanType.""" + + @abstractmethod + def visit_integer(self, integer_type: IntegerType, partner: Optional[P]) -> T: + """Visit a IntegerType.""" + + @abstractmethod + def visit_long(self, long_type: LongType, partner: Optional[P]) -> T: + """Visit a LongType.""" + + @abstractmethod + def visit_float(self, float_type: FloatType, partner: Optional[P]) -> T: + """Visit a FloatType.""" + + @abstractmethod + def visit_double(self, double_type: DoubleType, partner: Optional[P]) -> T: + """Visit a DoubleType.""" + + @abstractmethod + def visit_decimal(self, decimal_type: DecimalType, partner: Optional[P]) -> T: + """Visit a DecimalType.""" + + @abstractmethod + def visit_date(self, date_type: DateType, partner: Optional[P]) -> T: + """Visit a DecimalType.""" + + @abstractmethod + def visit_time(self, time_type: TimeType, partner: Optional[P]) -> T: + """Visit a DecimalType.""" + + @abstractmethod + def visit_timestamp(self, timestamp_type: TimestampType, partner: Optional[P]) -> T: + """Visit a TimestampType.""" + + @abstractmethod + def visit_timestamptz(self, timestamptz_type: TimestamptzType, partner: Optional[P]) -> T: + """Visit a TimestamptzType.""" + + @abstractmethod + def visit_string(self, string_type: StringType, partner: Optional[P]) -> T: + """Visit a StringType.""" + + @abstractmethod + def visit_uuid(self, uuid_type: UUIDType, partner: Optional[P]) -> T: + """Visit a UUIDType.""" + + @abstractmethod + def visit_fixed(self, fixed_type: FixedType, partner: Optional[P]) -> T: + """Visit a FixedType.""" + + @abstractmethod + def visit_binary(self, binary_type: BinaryType, partner: Optional[P]) -> T: + """Visit a BinaryType.""" + + +class PartnerAccessor(Generic[P], ABC): + @abstractmethod + def schema_partner(self, partner: Optional[P]) -> Optional[P]: + """Return the equivalent of the schema as a struct.""" + + @abstractmethod + def field_partner(self, partner_struct: Optional[P], field_id: int, field_name: str) -> Optional[P]: + """Return the equivalent struct field by name or id in the partner struct.""" + + @abstractmethod + def list_element_partner(self, partner_list: Optional[P]) -> Optional[P]: + """Return the equivalent list element in the partner list.""" + + @abstractmethod + def map_key_partner(self, partner_map: Optional[P]) -> Optional[P]: + """Return the equivalent map key in the partner map.""" + + @abstractmethod + def map_value_partner(self, partner_map: Optional[P]) -> Optional[P]: + """Return the equivalent map value in the partner map.""" + + +@singledispatch +def visit_with_partner( + schema_or_type: Union[Schema, IcebergType], partner: P, visitor: SchemaWithPartnerVisitor[T, P], accessor: PartnerAccessor[P] +) -> T: + raise ValueError(f"Unsupported type: {schema_or_type}") + + +@visit_with_partner.register(Schema) +def _(schema: Schema, partner: P, visitor: SchemaWithPartnerVisitor[P, T], accessor: PartnerAccessor[P]) -> T: + struct_partner = accessor.schema_partner(partner) + return visitor.schema(schema, partner, visit_with_partner(schema.as_struct(), struct_partner, visitor, accessor)) # type: ignore + + +@visit_with_partner.register(StructType) +def _(struct: StructType, partner: P, visitor: SchemaWithPartnerVisitor[P, T], accessor: PartnerAccessor[P]) -> T: + field_results = [] + for field in struct.fields: + field_partner = accessor.field_partner(partner, field.field_id, field.name) + visitor.before_field(field, field_partner) + try: + field_result = visit_with_partner(field.field_type, field_partner, visitor, accessor) # type: ignore + field_results.append(visitor.field(field, field_partner, field_result)) + finally: + visitor.after_field(field, field_partner) + + return visitor.struct(struct, partner, field_results) + + +@visit_with_partner.register(ListType) +def _(list_type: ListType, partner: P, visitor: SchemaWithPartnerVisitor[P, T], accessor: PartnerAccessor[P]) -> T: + element_partner = accessor.list_element_partner(partner) + visitor.before_list_element(list_type.element_field, element_partner) + try: + element_result = visit_with_partner(list_type.element_type, element_partner, visitor, accessor) # type: ignore + finally: + visitor.after_list_element(list_type.element_field, element_partner) + + return visitor.list(list_type, partner, element_result) + + +@visit_with_partner.register(MapType) +def _(map_type: MapType, partner: P, visitor: SchemaWithPartnerVisitor[P, T], accessor: PartnerAccessor[P]) -> T: + key_partner = accessor.map_key_partner(partner) + visitor.before_map_key(map_type.key_field, key_partner) + try: + key_result = visit_with_partner(map_type.key_type, key_partner, visitor, accessor) # type: ignore + finally: + visitor.after_map_key(map_type.key_field, key_partner) + + value_partner = accessor.map_value_partner(partner) + visitor.before_map_value(map_type.value_field, value_partner) + try: + value_result = visit_with_partner(map_type.value_type, value_partner, visitor, accessor) # type: ignore + finally: + visitor.after_map_value(map_type.value_field, value_partner) + return visitor.map(map_type, partner, key_result, value_result) + + +@visit_with_partner.register(PrimitiveType) +def _(primitive: PrimitiveType, partner: P, visitor: SchemaWithPartnerVisitor[P, T], _: PartnerAccessor[P]) -> T: + return visitor.primitive(primitive, partner) + + +class SchemaVisitorPerPrimitiveType(SchemaVisitor[T], ABC): + def primitive(self, primitive: PrimitiveType) -> T: + """Visit a PrimitiveType.""" + if isinstance(primitive, FixedType): + return self.visit_fixed(primitive) + elif isinstance(primitive, DecimalType): + return self.visit_decimal(primitive) + elif isinstance(primitive, BooleanType): + return self.visit_boolean(primitive) + elif isinstance(primitive, IntegerType): + return self.visit_integer(primitive) + elif isinstance(primitive, LongType): + return self.visit_long(primitive) + elif isinstance(primitive, FloatType): + return self.visit_float(primitive) + elif isinstance(primitive, DoubleType): + return self.visit_double(primitive) + elif isinstance(primitive, DateType): + return self.visit_date(primitive) + elif isinstance(primitive, TimeType): + return self.visit_time(primitive) + elif isinstance(primitive, TimestampType): + return self.visit_timestamp(primitive) + elif isinstance(primitive, TimestamptzType): + return self.visit_timestamptz(primitive) + elif isinstance(primitive, StringType): + return self.visit_string(primitive) + elif isinstance(primitive, UUIDType): + return self.visit_uuid(primitive) + elif isinstance(primitive, BinaryType): + return self.visit_binary(primitive) + else: + raise ValueError(f"Unknown type: {primitive}") + + @abstractmethod + def visit_fixed(self, fixed_type: FixedType) -> T: + """Visit a FixedType.""" + + @abstractmethod + def visit_decimal(self, decimal_type: DecimalType) -> T: + """Visit a DecimalType.""" + + @abstractmethod + def visit_boolean(self, boolean_type: BooleanType) -> T: + """Visit a BooleanType.""" + + @abstractmethod + def visit_integer(self, integer_type: IntegerType) -> T: + """Visit a IntegerType.""" + + @abstractmethod + def visit_long(self, long_type: LongType) -> T: + """Visit a LongType.""" + + @abstractmethod + def visit_float(self, float_type: FloatType) -> T: + """Visit a FloatType.""" + + @abstractmethod + def visit_double(self, double_type: DoubleType) -> T: + """Visit a DoubleType.""" + + @abstractmethod + def visit_date(self, date_type: DateType) -> T: + """Visit a DecimalType.""" + + @abstractmethod + def visit_time(self, time_type: TimeType) -> T: + """Visit a DecimalType.""" + + @abstractmethod + def visit_timestamp(self, timestamp_type: TimestampType) -> T: + """Visit a TimestampType.""" + + @abstractmethod + def visit_timestamptz(self, timestamptz_type: TimestamptzType) -> T: + """Visit a TimestamptzType.""" + + @abstractmethod + def visit_string(self, string_type: StringType) -> T: + """Visit a StringType.""" + + @abstractmethod + def visit_uuid(self, uuid_type: UUIDType) -> T: + """Visit a UUIDType.""" + + @abstractmethod + def visit_binary(self, binary_type: BinaryType) -> T: + """Visit a BinaryType.""" + + +@dataclass(init=True, eq=True, frozen=True) +class Accessor: + """An accessor for a specific position in a container that implements the StructProtocol.""" + + position: int + inner: Optional[Accessor] = None + + def __str__(self) -> str: + """Return the string representation of the Accessor class.""" + return f"Accessor(position={self.position},inner={self.inner})" + + def __repr__(self) -> str: + """Return the string representation of the Accessor class.""" + return self.__str__() + + def get(self, container: StructProtocol) -> Any: + """Return the value at self.position in `container`. + + Args: + container (StructProtocol): A container to access at position `self.position`. + + Returns: + Any: The value at position `self.position` in the container. + """ + pos = self.position + val = container[pos] + inner = self + while inner.inner: + inner = inner.inner + val = val[inner.position] + + return val + + +@singledispatch +def visit(obj: Union[Schema, IcebergType], visitor: SchemaVisitor[T]) -> T: + """Apply a schema visitor to any point within a schema. + + The function traverses the schema in post-order fashion. + + Args: + obj (Union[Schema, IcebergType]): An instance of a Schema or an IcebergType. + visitor (SchemaVisitor[T]): An instance of an implementation of the generic SchemaVisitor base class. + + Raises: + NotImplementedError: If attempting to visit an unrecognized object type. + """ + raise NotImplementedError("Cannot visit non-type: %s" % obj) + + +@visit.register(Schema) +def _(obj: Schema, visitor: SchemaVisitor[T]) -> T: + """Visit a Schema with a concrete SchemaVisitor.""" + return visitor.schema(obj, visit(obj.as_struct(), visitor)) + + +@visit.register(StructType) +def _(obj: StructType, visitor: SchemaVisitor[T]) -> T: + """Visit a StructType with a concrete SchemaVisitor.""" + results = [] + + for field in obj.fields: + visitor.before_field(field) + result = visit(field.field_type, visitor) + visitor.after_field(field) + results.append(visitor.field(field, result)) + + return visitor.struct(obj, results) + + +@visit.register(ListType) +def _(obj: ListType, visitor: SchemaVisitor[T]) -> T: + """Visit a ListType with a concrete SchemaVisitor.""" + visitor.before_list_element(obj.element_field) + result = visit(obj.element_type, visitor) + visitor.after_list_element(obj.element_field) + + return visitor.list(obj, result) + + +@visit.register(MapType) +def _(obj: MapType, visitor: SchemaVisitor[T]) -> T: + """Visit a MapType with a concrete SchemaVisitor.""" + visitor.before_map_key(obj.key_field) + key_result = visit(obj.key_type, visitor) + visitor.after_map_key(obj.key_field) + + visitor.before_map_value(obj.value_field) + value_result = visit(obj.value_type, visitor) + visitor.after_map_value(obj.value_field) + + return visitor.map(obj, key_result, value_result) + + +@visit.register(PrimitiveType) +def _(obj: PrimitiveType, visitor: SchemaVisitor[T]) -> T: + """Visit a PrimitiveType with a concrete SchemaVisitor.""" + return visitor.primitive(obj) + + +@singledispatch +def pre_order_visit(obj: Union[Schema, IcebergType], visitor: PreOrderSchemaVisitor[T]) -> T: + """Apply a schema visitor to any point within a schema. + + The function traverses the schema in pre-order fashion. This is a slimmed down version + compared to the post-order traversal (missing before and after methods), mostly + because we don't use the pre-order traversal much. + + Args: + obj (Union[Schema, IcebergType]): An instance of a Schema or an IcebergType. + visitor (PreOrderSchemaVisitor[T]): An instance of an implementation of the generic PreOrderSchemaVisitor base class. + + Raises: + NotImplementedError: If attempting to visit an unrecognized object type. + """ + raise NotImplementedError("Cannot visit non-type: %s" % obj) + + +@pre_order_visit.register(Schema) +def _(obj: Schema, visitor: PreOrderSchemaVisitor[T]) -> T: + """Visit a Schema with a concrete PreOrderSchemaVisitor.""" + return visitor.schema(obj, lambda: pre_order_visit(obj.as_struct(), visitor)) + + +@pre_order_visit.register(StructType) +def _(obj: StructType, visitor: PreOrderSchemaVisitor[T]) -> T: + """Visit a StructType with a concrete PreOrderSchemaVisitor.""" + return visitor.struct( + obj, + [ + partial( + lambda field: visitor.field(field, partial(lambda field: pre_order_visit(field.field_type, visitor), field)), + field, + ) + for field in obj.fields + ], + ) + + +@pre_order_visit.register(ListType) +def _(obj: ListType, visitor: PreOrderSchemaVisitor[T]) -> T: + """Visit a ListType with a concrete PreOrderSchemaVisitor.""" + return visitor.list(obj, lambda: pre_order_visit(obj.element_type, visitor)) + + +@pre_order_visit.register(MapType) +def _(obj: MapType, visitor: PreOrderSchemaVisitor[T]) -> T: + """Visit a MapType with a concrete PreOrderSchemaVisitor.""" + return visitor.map(obj, lambda: pre_order_visit(obj.key_type, visitor), lambda: pre_order_visit(obj.value_type, visitor)) + + +@pre_order_visit.register(PrimitiveType) +def _(obj: PrimitiveType, visitor: PreOrderSchemaVisitor[T]) -> T: + """Visit a PrimitiveType with a concrete PreOrderSchemaVisitor.""" + return visitor.primitive(obj) + + +class _IndexById(SchemaVisitor[Dict[int, NestedField]]): + """A schema visitor for generating a field ID to NestedField index.""" + + def __init__(self) -> None: + self._index: Dict[int, NestedField] = {} + + def schema(self, schema: Schema, struct_result: Dict[int, NestedField]) -> Dict[int, NestedField]: + return self._index + + def struct(self, struct: StructType, field_results: List[Dict[int, NestedField]]) -> Dict[int, NestedField]: + return self._index + + def field(self, field: NestedField, field_result: Dict[int, NestedField]) -> Dict[int, NestedField]: + """Add the field ID to the index.""" + self._index[field.field_id] = field + return self._index + + def list(self, list_type: ListType, element_result: Dict[int, NestedField]) -> Dict[int, NestedField]: + """Add the list element ID to the index.""" + self._index[list_type.element_field.field_id] = list_type.element_field + return self._index + + def map( + self, map_type: MapType, key_result: Dict[int, NestedField], value_result: Dict[int, NestedField] + ) -> Dict[int, NestedField]: + """Add the key ID and value ID as individual items in the index.""" + self._index[map_type.key_field.field_id] = map_type.key_field + self._index[map_type.value_field.field_id] = map_type.value_field + return self._index + + def primitive(self, primitive: PrimitiveType) -> Dict[int, NestedField]: + return self._index + + +def index_by_id(schema_or_type: Union[Schema, IcebergType]) -> Dict[int, NestedField]: + """Generate an index of field IDs to NestedField instances. + + Args: + schema_or_type (Union[Schema, IcebergType]): A schema or type to index. + + Returns: + Dict[int, NestedField]: An index of field IDs to NestedField instances. + """ + return visit(schema_or_type, _IndexById()) + + +class _IndexParents(SchemaVisitor[Dict[int, int]]): + def __init__(self) -> None: + self.id_to_parent: Dict[int, int] = {} + self.id_stack: List[int] = [] + + def before_field(self, field: NestedField) -> None: + self.id_stack.append(field.field_id) + + def after_field(self, field: NestedField) -> None: + self.id_stack.pop() + + def schema(self, schema: Schema, struct_result: Dict[int, int]) -> Dict[int, int]: + return self.id_to_parent + + def struct(self, struct: StructType, field_results: List[Dict[int, int]]) -> Dict[int, int]: + for field in struct.fields: + parent_id = self.id_stack[-1] if self.id_stack else None + if parent_id is not None: + # fields in the root struct are not added + self.id_to_parent[field.field_id] = parent_id + + return self.id_to_parent + + def field(self, field: NestedField, field_result: Dict[int, int]) -> Dict[int, int]: + return self.id_to_parent + + def list(self, list_type: ListType, element_result: Dict[int, int]) -> Dict[int, int]: + self.id_to_parent[list_type.element_id] = self.id_stack[-1] + return self.id_to_parent + + def map(self, map_type: MapType, key_result: Dict[int, int], value_result: Dict[int, int]) -> Dict[int, int]: + self.id_to_parent[map_type.key_id] = self.id_stack[-1] + self.id_to_parent[map_type.value_id] = self.id_stack[-1] + return self.id_to_parent + + def primitive(self, primitive: PrimitiveType) -> Dict[int, int]: + return self.id_to_parent + + +def _index_parents(schema_or_type: Union[Schema, IcebergType]) -> Dict[int, int]: + """Generate an index of field IDs to their parent field IDs. + + Args: + schema_or_type (Union[Schema, IcebergType]): A schema or type to index. + + Returns: + Dict[int, int]: An index of field IDs to their parent field IDs. + """ + return visit(schema_or_type, _IndexParents()) + + +class _IndexByName(SchemaVisitor[Dict[str, int]]): + """A schema visitor for generating a field name to field ID index.""" + + def __init__(self) -> None: + self._index: Dict[str, int] = {} + self._short_name_to_id: Dict[str, int] = {} + self._combined_index: Dict[str, int] = {} + self._field_names: List[str] = [] + self._short_field_names: List[str] = [] + + def before_map_value(self, value: NestedField) -> None: + if not isinstance(value.field_type, StructType): + self._short_field_names.append(value.name) + self._field_names.append(value.name) + + def after_map_value(self, value: NestedField) -> None: + if not isinstance(value.field_type, StructType): + self._short_field_names.pop() + self._field_names.pop() + + def before_list_element(self, element: NestedField) -> None: + """Short field names omit element when the element is a StructType.""" + if not isinstance(element.field_type, StructType): + self._short_field_names.append(element.name) + self._field_names.append(element.name) + + def after_list_element(self, element: NestedField) -> None: + if not isinstance(element.field_type, StructType): + self._short_field_names.pop() + self._field_names.pop() + + def before_field(self, field: NestedField) -> None: + """Store the field name.""" + self._field_names.append(field.name) + self._short_field_names.append(field.name) + + def after_field(self, field: NestedField) -> None: + """Remove the last field name stored.""" + self._field_names.pop() + self._short_field_names.pop() + + def schema(self, schema: Schema, struct_result: Dict[str, int]) -> Dict[str, int]: + return self._index + + def struct(self, struct: StructType, field_results: List[Dict[str, int]]) -> Dict[str, int]: + return self._index + + def field(self, field: NestedField, field_result: Dict[str, int]) -> Dict[str, int]: + """Add the field name to the index.""" + self._add_field(field.name, field.field_id) + return self._index + + def list(self, list_type: ListType, element_result: Dict[str, int]) -> Dict[str, int]: + """Add the list element name to the index.""" + self._add_field(list_type.element_field.name, list_type.element_field.field_id) + return self._index + + def map(self, map_type: MapType, key_result: Dict[str, int], value_result: Dict[str, int]) -> Dict[str, int]: + """Add the key name and value name as individual items in the index.""" + self._add_field(map_type.key_field.name, map_type.key_field.field_id) + self._add_field(map_type.value_field.name, map_type.value_field.field_id) + return self._index + + def _add_field(self, name: str, field_id: int) -> None: + """Add a field name to the index, mapping its full name to its field ID. + + Args: + name (str): The field name. + field_id (int): The field ID. + + Raises: + ValueError: If the field name is already contained in the index. + """ + full_name = name + + if self._field_names: + full_name = ".".join([".".join(self._field_names), name]) + + if full_name in self._index: + raise ValueError(f"Invalid schema, multiple fields for name {full_name}: {self._index[full_name]} and {field_id}") + self._index[full_name] = field_id + + if self._short_field_names: + short_name = ".".join([".".join(self._short_field_names), name]) + self._short_name_to_id[short_name] = field_id + + def primitive(self, primitive: PrimitiveType) -> Dict[str, int]: + return self._index + + def by_name(self) -> Dict[str, int]: + """Return an index of combined full and short names. + + Note: Only short names that do not conflict with full names are included. + """ + combined_index = self._short_name_to_id.copy() + combined_index.update(self._index) + return combined_index + + def by_id(self) -> Dict[int, str]: + """Return an index of ID to full names.""" + id_to_full_name = {value: key for key, value in self._index.items()} + return id_to_full_name + + +def index_by_name(schema_or_type: Union[Schema, IcebergType]) -> Dict[str, int]: + """Generate an index of field names to field IDs. + + Args: + schema_or_type (Union[Schema, IcebergType]): A schema or type to index. + + Returns: + Dict[str, int]: An index of field names to field IDs. + """ + if len(schema_or_type.fields) > 0: + indexer = _IndexByName() + visit(schema_or_type, indexer) + return indexer.by_name() + else: + return EMPTY_DICT + + +def index_name_by_id(schema_or_type: Union[Schema, IcebergType]) -> Dict[int, str]: + """Generate an index of field IDs full field names. + + Args: + schema_or_type (Union[Schema, IcebergType]): A schema or type to index. + + Returns: + Dict[str, int]: An index of field IDs to full names. + """ + indexer = _IndexByName() + visit(schema_or_type, indexer) + return indexer.by_id() + + +Position = int + + +class _BuildPositionAccessors(SchemaVisitor[Dict[Position, Accessor]]): + """A schema visitor for generating a field ID to accessor index. + + Example: + >>> from pyiceberg.schema import Schema + >>> from pyiceberg.types import * + >>> schema = Schema( + ... NestedField(field_id=2, name="id", field_type=IntegerType(), required=False), + ... NestedField(field_id=1, name="data", field_type=StringType(), required=True), + ... NestedField( + ... field_id=3, + ... name="location", + ... field_type=StructType( + ... NestedField(field_id=5, name="latitude", field_type=FloatType(), required=False), + ... NestedField(field_id=6, name="longitude", field_type=FloatType(), required=False), + ... ), + ... required=True, + ... ), + ... schema_id=1, + ... identifier_field_ids=[1], + ... ) + >>> result = build_position_accessors(schema) + >>> expected = { + ... 2: Accessor(position=0, inner=None), + ... 1: Accessor(position=1, inner=None), + ... 5: Accessor(position=2, inner=Accessor(position=0, inner=None)), + ... 6: Accessor(position=2, inner=Accessor(position=1, inner=None)) + ... } + >>> result == expected + True + """ + + def schema(self, schema: Schema, struct_result: Dict[Position, Accessor]) -> Dict[Position, Accessor]: + return struct_result + + def struct(self, struct: StructType, field_results: List[Dict[Position, Accessor]]) -> Dict[Position, Accessor]: + result = {} + + for position, field in enumerate(struct.fields): + if field_results[position]: + for inner_field_id, acc in field_results[position].items(): + result[inner_field_id] = Accessor(position, inner=acc) + else: + result[field.field_id] = Accessor(position) + + return result + + def field(self, field: NestedField, field_result: Dict[Position, Accessor]) -> Dict[Position, Accessor]: + return field_result + + def list(self, list_type: ListType, element_result: Dict[Position, Accessor]) -> Dict[Position, Accessor]: + return {} + + def map( + self, map_type: MapType, key_result: Dict[Position, Accessor], value_result: Dict[Position, Accessor] + ) -> Dict[Position, Accessor]: + return {} + + def primitive(self, primitive: PrimitiveType) -> Dict[Position, Accessor]: + return {} + + +def build_position_accessors(schema_or_type: Union[Schema, IcebergType]) -> Dict[int, Accessor]: + """Generate an index of field IDs to schema position accessors. + + Args: + schema_or_type (Union[Schema, IcebergType]): A schema or type to index. + + Returns: + Dict[int, Accessor]: An index of field IDs to accessors. + """ + return visit(schema_or_type, _BuildPositionAccessors()) + + +def assign_fresh_schema_ids(schema_or_type: Union[Schema, IcebergType], next_id: Optional[Callable[[], int]] = None) -> Schema: + """Traverses the schema, and sets new IDs.""" + return pre_order_visit(schema_or_type, _SetFreshIDs(next_id_func=next_id)) + + +class _SetFreshIDs(PreOrderSchemaVisitor[IcebergType]): + """Traverses the schema and assigns monotonically increasing ids.""" + + reserved_ids: Dict[int, int] + + def __init__(self, next_id_func: Optional[Callable[[], int]] = None) -> None: + self.reserved_ids = {} + counter = itertools.count(1) + self.next_id_func = next_id_func if next_id_func is not None else lambda: next(counter) + + def _get_and_increment(self) -> int: + return self.next_id_func() + + def schema(self, schema: Schema, struct_result: Callable[[], StructType]) -> Schema: + # First we keep the original identifier_field_ids here, we remap afterwards + fields = struct_result().fields + return Schema(*fields, identifier_field_ids=[self.reserved_ids[field_id] for field_id in schema.identifier_field_ids]) + + def struct(self, struct: StructType, field_results: List[Callable[[], IcebergType]]) -> StructType: + # assign IDs for this struct's fields first + self.reserved_ids.update({field.field_id: self._get_and_increment() for field in struct.fields}) + return StructType(*[field() for field in field_results]) + + def field(self, field: NestedField, field_result: Callable[[], IcebergType]) -> IcebergType: + return NestedField( + field_id=self.reserved_ids[field.field_id], + name=field.name, + field_type=field_result(), + required=field.required, + doc=field.doc, + ) + + def list(self, list_type: ListType, element_result: Callable[[], IcebergType]) -> ListType: + self.reserved_ids[list_type.element_id] = self._get_and_increment() + return ListType( + element_id=self.reserved_ids[list_type.element_id], + element=element_result(), + element_required=list_type.element_required, + ) + + def map(self, map_type: MapType, key_result: Callable[[], IcebergType], value_result: Callable[[], IcebergType]) -> MapType: + self.reserved_ids[map_type.key_id] = self._get_and_increment() + self.reserved_ids[map_type.value_id] = self._get_and_increment() + return MapType( + key_id=self.reserved_ids[map_type.key_id], + key_type=key_result(), + value_id=self.reserved_ids[map_type.value_id], + value_type=value_result(), + value_required=map_type.value_required, + ) + + def primitive(self, primitive: PrimitiveType) -> PrimitiveType: + return primitive + + +def prune_columns(schema: Schema, selected: Set[int], select_full_types: bool = True) -> Schema: + """Prunes a column by only selecting a set of field-ids. + + Args: + schema: The schema to be pruned. + selected: The field-ids to be included. + select_full_types: Return the full struct when a subset is recorded + + Returns: + The pruned schema. + """ + result = visit(schema.as_struct(), _PruneColumnsVisitor(selected, select_full_types)) + return Schema( + *(result or StructType()).fields, + schema_id=schema.schema_id, + identifier_field_ids=list(selected.intersection(schema.identifier_field_ids)), + ) + + +class _PruneColumnsVisitor(SchemaVisitor[Optional[IcebergType]]): + selected: Set[int] + select_full_types: bool + + def __init__(self, selected: Set[int], select_full_types: bool): + self.selected = selected + self.select_full_types = select_full_types + + def schema(self, schema: Schema, struct_result: Optional[IcebergType]) -> Optional[IcebergType]: + return struct_result + + def struct(self, struct: StructType, field_results: List[Optional[IcebergType]]) -> Optional[IcebergType]: + fields = struct.fields + selected_fields = [] + same_type = True + + for idx, projected_type in enumerate(field_results): + field = fields[idx] + if field.field_type == projected_type: + selected_fields.append(field) + elif projected_type is not None: + same_type = False + # Type has changed, create a new field with the projected type + selected_fields.append( + NestedField( + field_id=field.field_id, + name=field.name, + field_type=projected_type, + doc=field.doc, + required=field.required, + ) + ) + + if selected_fields: + if len(selected_fields) == len(fields) and same_type is True: + # Nothing has changed, and we can return the original struct + return struct + else: + return StructType(*selected_fields) + return None + + def field(self, field: NestedField, field_result: Optional[IcebergType]) -> Optional[IcebergType]: + if field.field_id in self.selected: + if self.select_full_types: + return field.field_type + elif field.field_type.is_struct: + return self._project_selected_struct(field_result) + else: + if not field.field_type.is_primitive: + raise ValueError( + f"Cannot explicitly project List or Map types, {field.field_id}:{field.name} of type {field.field_type} was selected" + ) + # Selected non-struct field + return field.field_type + elif field_result is not None: + # This field wasn't selected but a subfield was so include that + return field_result + else: + return None + + def list(self, list_type: ListType, element_result: Optional[IcebergType]) -> Optional[IcebergType]: + if list_type.element_id in self.selected: + if self.select_full_types: + return list_type + elif list_type.element_type and list_type.element_type.is_struct: + projected_struct = self._project_selected_struct(element_result) + return self._project_list(list_type, projected_struct) + else: + if not list_type.element_type.is_primitive: + raise ValueError( + f"Cannot explicitly project List or Map types, {list_type.element_id} of type {list_type.element_type} was selected" + ) + return list_type + elif element_result is not None: + return self._project_list(list_type, element_result) + else: + return None + + def map( + self, map_type: MapType, key_result: Optional[IcebergType], value_result: Optional[IcebergType] + ) -> Optional[IcebergType]: + if map_type.value_id in self.selected: + if self.select_full_types: + return map_type + elif map_type.value_type and map_type.value_type.is_struct: + projected_struct = self._project_selected_struct(value_result) + return self._project_map(map_type, projected_struct) + if not map_type.value_type.is_primitive: + raise ValueError( + f"Cannot explicitly project List or Map types, Map value {map_type.value_id} of type {map_type.value_type} was selected" + ) + return map_type + elif value_result is not None: + return self._project_map(map_type, value_result) + elif map_type.key_id in self.selected: + return map_type + return None + + def primitive(self, primitive: PrimitiveType) -> Optional[IcebergType]: + return None + + @staticmethod + def _project_selected_struct(projected_field: Optional[IcebergType]) -> StructType: + if projected_field and not isinstance(projected_field, StructType): + raise ValueError("Expected a struct") + + if projected_field is None: + return StructType() + else: + return projected_field + + @staticmethod + def _project_list(list_type: ListType, element_result: IcebergType) -> ListType: + if list_type.element_type == element_result: + return list_type + else: + return ListType( + element_id=list_type.element_id, element_type=element_result, element_required=list_type.element_required + ) + + @staticmethod + def _project_map(map_type: MapType, value_result: IcebergType) -> MapType: + if map_type.value_type == value_result: + return map_type + else: + return MapType( + key_id=map_type.key_id, + value_id=map_type.value_id, + key_type=map_type.key_type, + value_type=value_result, + value_required=map_type.value_required, + ) + + +@singledispatch +def promote(file_type: IcebergType, read_type: IcebergType) -> IcebergType: + """Promotes reading a file type to a read type. + + Args: + file_type (IcebergType): The type of the Avro file. + read_type (IcebergType): The requested read type. + + Raises: + ResolveError: If attempting to resolve an unrecognized object type. + """ + if file_type == read_type: + return file_type + else: + raise ResolveError(f"Cannot promote {file_type} to {read_type}") + + +@promote.register(IntegerType) +def _(file_type: IntegerType, read_type: IcebergType) -> IcebergType: + if isinstance(read_type, LongType): + # Ints/Longs are binary compatible in Avro, so this is okay + return read_type + else: + raise ResolveError(f"Cannot promote an int to {read_type}") + + +@promote.register(FloatType) +def _(file_type: FloatType, read_type: IcebergType) -> IcebergType: + if isinstance(read_type, DoubleType): + # A double type is wider + return read_type + else: + raise ResolveError(f"Cannot promote an float to {read_type}") + + +@promote.register(StringType) +def _(file_type: StringType, read_type: IcebergType) -> IcebergType: + if isinstance(read_type, BinaryType): + return read_type + else: + raise ResolveError(f"Cannot promote an string to {read_type}") + + +@promote.register(BinaryType) +def _(file_type: BinaryType, read_type: IcebergType) -> IcebergType: + if isinstance(read_type, StringType): + return read_type + else: + raise ResolveError(f"Cannot promote an binary to {read_type}") + + +@promote.register(DecimalType) +def _(file_type: DecimalType, read_type: IcebergType) -> IcebergType: + if isinstance(read_type, DecimalType): + if file_type.precision <= read_type.precision and file_type.scale == file_type.scale: + return read_type + else: + raise ResolveError(f"Cannot reduce precision from {file_type} to {read_type}") + else: + raise ResolveError(f"Cannot promote an decimal to {read_type}") + + +@promote.register(FixedType) +def _(file_type: FixedType, read_type: IcebergType) -> IcebergType: + if isinstance(read_type, UUIDType) and len(file_type) == 16: + # Since pyarrow reads parquet UUID as fixed 16-byte binary, the promotion is needed to ensure read compatibility + return read_type + else: + raise ResolveError(f"Cannot promote {file_type} to {read_type}") diff --git a/pyiceberg/serializers.py b/pyiceberg/serializers.py new file mode 100644 index 0000000000..65d4c759bf --- /dev/null +++ b/pyiceberg/serializers.py @@ -0,0 +1,131 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +from __future__ import annotations + +import codecs +import gzip +from abc import ABC, abstractmethod +from typing import Callable + +from pyiceberg.io import InputFile, InputStream, OutputFile +from pyiceberg.table.metadata import TableMetadata, TableMetadataUtil + +GZIP = "gzip" + + +class Compressor(ABC): + @staticmethod + def get_compressor(location: str) -> Compressor: + return GzipCompressor() if location.endswith(".gz.metadata.json") else NOOP_COMPRESSOR + + @abstractmethod + def stream_decompressor(self, inp: InputStream) -> InputStream: + """Return a stream decompressor. + + Args: + inp: The input stream that needs decompressing. + + Returns: + The wrapped stream + """ + + @abstractmethod + def bytes_compressor(self) -> Callable[[bytes], bytes]: + """Return a function to compress bytes. + + Returns: + A function that can be used to compress bytes. + """ + + +class NoopCompressor(Compressor): + def stream_decompressor(self, inp: InputStream) -> InputStream: + return inp + + def bytes_compressor(self) -> Callable[[bytes], bytes]: + return lambda b: b + + +NOOP_COMPRESSOR = NoopCompressor() + + +class GzipCompressor(Compressor): + def stream_decompressor(self, inp: InputStream) -> InputStream: + return gzip.open(inp) + + def bytes_compressor(self) -> Callable[[bytes], bytes]: + return gzip.compress + + +class FromByteStream: + """A collection of methods that deserialize dictionaries into Iceberg objects.""" + + @staticmethod + def table_metadata( + byte_stream: InputStream, encoding: str = "utf-8", compression: Compressor = NOOP_COMPRESSOR + ) -> TableMetadata: + """Instantiate a TableMetadata object from a byte stream. + + Args: + byte_stream: A file-like byte stream object. + encoding (default "utf-8"): The byte encoder to use for the reader. + compression: Optional compression method + """ + with compression.stream_decompressor(byte_stream) as byte_stream: + reader = codecs.getreader(encoding) + json_bytes = reader(byte_stream) + metadata = json_bytes.read() + + return TableMetadataUtil.parse_raw(metadata) + + +class FromInputFile: + """A collection of methods that deserialize InputFiles into Iceberg objects.""" + + @staticmethod + def table_metadata(input_file: InputFile, encoding: str = "utf-8") -> TableMetadata: + """Create a TableMetadata instance from an input file. + + Args: + input_file (InputFile): A custom implementation of the iceberg.io.file.InputFile abstract base class. + encoding (str): Encoding to use when loading bytestream. + + Returns: + TableMetadata: A table metadata instance. + + """ + with input_file.open() as input_stream: + return FromByteStream.table_metadata( + byte_stream=input_stream, encoding=encoding, compression=Compressor.get_compressor(location=input_file.location) + ) + + +class ToOutputFile: + """A collection of methods that serialize Iceberg objects into files given an OutputFile instance.""" + + @staticmethod + def table_metadata(metadata: TableMetadata, output_file: OutputFile, overwrite: bool = False) -> None: + """Write a TableMetadata instance to an output file. + + Args: + output_file (OutputFile): A custom implementation of the iceberg.io.file.OutputFile abstract base class. + overwrite (bool): Where to overwrite the file if it already exists. Defaults to `False`. + """ + with output_file.create(overwrite=overwrite) as output_stream: + json_bytes = metadata.model_dump_json().encode("utf-8") + json_bytes = Compressor.get_compressor(output_file.location).bytes_compressor()(json_bytes) + output_stream.write(json_bytes) diff --git a/pyiceberg/table/__init__.py b/pyiceberg/table/__init__.py new file mode 100644 index 0000000000..8443315a64 --- /dev/null +++ b/pyiceberg/table/__init__.py @@ -0,0 +1,1568 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +from __future__ import annotations + +import itertools +from abc import ABC, abstractmethod +from copy import copy +from dataclasses import dataclass +from enum import Enum +from functools import cached_property +from itertools import chain +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Dict, + Iterable, + List, + Literal, + Optional, + Set, + Tuple, + TypeVar, + Union, +) + +from pydantic import Field, SerializeAsAny +from sortedcontainers import SortedList + +from pyiceberg.exceptions import ResolveError, ValidationError +from pyiceberg.expressions import ( + AlwaysTrue, + And, + BooleanExpression, + EqualTo, + parser, + visitors, +) +from pyiceberg.expressions.visitors import _InclusiveMetricsEvaluator, inclusive_projection +from pyiceberg.io import FileIO, load_file_io +from pyiceberg.manifest import ( + POSITIONAL_DELETE_SCHEMA, + DataFile, + DataFileContent, + ManifestContent, + ManifestEntry, + ManifestFile, +) +from pyiceberg.partitioning import PartitionSpec +from pyiceberg.schema import ( + Schema, + SchemaVisitor, + assign_fresh_schema_ids, + promote, + visit, +) +from pyiceberg.table.metadata import INITIAL_SEQUENCE_NUMBER, TableMetadata +from pyiceberg.table.snapshots import Snapshot, SnapshotLogEntry +from pyiceberg.table.sorting import SortOrder +from pyiceberg.typedef import ( + EMPTY_DICT, + IcebergBaseModel, + Identifier, + KeyDefaultDict, + Properties, +) +from pyiceberg.types import ( + IcebergType, + ListType, + MapType, + NestedField, + PrimitiveType, + StructType, +) +from pyiceberg.utils.concurrent import ExecutorFactory + +if TYPE_CHECKING: + import pandas as pd + import pyarrow as pa + import ray + from duckdb import DuckDBPyConnection + + from pyiceberg.catalog import Catalog + +ALWAYS_TRUE = AlwaysTrue() +TABLE_ROOT_ID = -1 + + +class Transaction: + _table: Table + _updates: Tuple[TableUpdate, ...] + _requirements: Tuple[TableRequirement, ...] + + def __init__( + self, + table: Table, + actions: Optional[Tuple[TableUpdate, ...]] = None, + requirements: Optional[Tuple[TableRequirement, ...]] = None, + ): + self._table = table + self._updates = actions or () + self._requirements = requirements or () + + def __enter__(self) -> Transaction: + """Start a transaction to update the table.""" + return self + + def __exit__(self, _: Any, value: Any, traceback: Any) -> None: + """Close and commit the transaction.""" + fresh_table = self.commit_transaction() + # Update the new data in place + self._table.metadata = fresh_table.metadata + self._table.metadata_location = fresh_table.metadata_location + + def _append_updates(self, *new_updates: TableUpdate) -> Transaction: + """Append updates to the set of staged updates. + + Args: + *new_updates: Any new updates. + + Raises: + ValueError: When the type of update is not unique. + + Returns: + Transaction object with the new updates appended. + """ + for new_update in new_updates: + type_new_update = type(new_update) + if any(type(update) == type_new_update for update in self._updates): + raise ValueError(f"Updates in a single commit need to be unique, duplicate: {type_new_update}") + self._updates = self._updates + new_updates + return self + + def _append_requirements(self, *new_requirements: TableRequirement) -> Transaction: + """Append requirements to the set of staged requirements. + + Args: + *new_requirements: Any new requirements. + + Raises: + ValueError: When the type of requirement is not unique. + + Returns: + Transaction object with the new requirements appended. + """ + for requirement in new_requirements: + type_new_requirement = type(requirement) + if any(type(requirement) == type_new_requirement for update in self._requirements): + raise ValueError(f"Requirements in a single commit need to be unique, duplicate: {type_new_requirement}") + self._requirements = self._requirements + new_requirements + return self + + def set_table_version(self, format_version: Literal[1, 2]) -> Transaction: + """Set the table to a certain version. + + Args: + format_version: The newly set version. + + Returns: + The alter table builder. + """ + raise NotImplementedError("Not yet implemented") + + def set_properties(self, **updates: str) -> Transaction: + """Set properties. + + When a property is already set, it will be overwritten. + + Args: + updates: The properties set on the table. + + Returns: + The alter table builder. + """ + return self._append_updates(SetPropertiesUpdate(updates=updates)) + + def update_schema(self) -> UpdateSchema: + """Create a new UpdateSchema to alter the columns of this table. + + Returns: + A new UpdateSchema. + """ + return UpdateSchema(self._table, self) + + def remove_properties(self, *removals: str) -> Transaction: + """Remove properties. + + Args: + removals: Properties to be removed. + + Returns: + The alter table builder. + """ + return self._append_updates(RemovePropertiesUpdate(removals=removals)) + + def update_location(self, location: str) -> Transaction: + """Set the new table location. + + Args: + location: The new location of the table. + + Returns: + The alter table builder. + """ + raise NotImplementedError("Not yet implemented") + + def commit_transaction(self) -> Table: + """Commit the changes to the catalog. + + Returns: + The table with the updates applied. + """ + # Strip the catalog name + if len(self._updates) > 0: + self._table._do_commit( # pylint: disable=W0212 + updates=self._updates, + requirements=self._requirements, + ) + return self._table + else: + return self._table + + +class TableUpdateAction(Enum): + upgrade_format_version = "upgrade-format-version" + add_schema = "add-schema" + set_current_schema = "set-current-schema" + add_spec = "add-spec" + set_default_spec = "set-default-spec" + add_sort_order = "add-sort-order" + set_default_sort_order = "set-default-sort-order" + add_snapshot = "add-snapshot" + set_snapshot_ref = "set-snapshot-ref" + remove_snapshots = "remove-snapshots" + remove_snapshot_ref = "remove-snapshot-ref" + set_location = "set-location" + set_properties = "set-properties" + remove_properties = "remove-properties" + + +class TableUpdate(IcebergBaseModel): + action: TableUpdateAction + + +class UpgradeFormatVersionUpdate(TableUpdate): + action: TableUpdateAction = TableUpdateAction.upgrade_format_version + format_version: int = Field(alias="format-version") + + +class AddSchemaUpdate(TableUpdate): + action: TableUpdateAction = TableUpdateAction.add_schema + schema_: Schema = Field(alias="schema") + # This field is required: https://github.com/apache/iceberg/pull/7445 + last_column_id: int = Field(alias="last-column-id") + + +class SetCurrentSchemaUpdate(TableUpdate): + action: TableUpdateAction = TableUpdateAction.set_current_schema + schema_id: int = Field( + alias="schema-id", description="Schema ID to set as current, or -1 to set last added schema", default=-1 + ) + + +class AddPartitionSpecUpdate(TableUpdate): + action: TableUpdateAction = TableUpdateAction.add_spec + spec: PartitionSpec + + +class SetDefaultSpecUpdate(TableUpdate): + action: TableUpdateAction = TableUpdateAction.set_default_spec + spec_id: int = Field( + alias="spec-id", description="Partition spec ID to set as the default, or -1 to set last added spec", default=-1 + ) + + +class AddSortOrderUpdate(TableUpdate): + action: TableUpdateAction = TableUpdateAction.add_sort_order + sort_order: SortOrder = Field(alias="sort-order") + + +class SetDefaultSortOrderUpdate(TableUpdate): + action: TableUpdateAction = TableUpdateAction.set_default_sort_order + sort_order_id: int = Field( + alias="sort-order-id", description="Sort order ID to set as the default, or -1 to set last added sort order", default=-1 + ) + + +class AddSnapshotUpdate(TableUpdate): + action: TableUpdateAction = TableUpdateAction.add_snapshot + snapshot: Snapshot + + +class SetSnapshotRefUpdate(TableUpdate): + action: TableUpdateAction = TableUpdateAction.set_snapshot_ref + ref_name: str = Field(alias="ref-name") + type: Literal["tag", "branch"] + snapshot_id: int = Field(alias="snapshot-id") + max_age_ref_ms: int = Field(alias="max-ref-age-ms") + max_snapshot_age_ms: int = Field(alias="max-snapshot-age-ms") + min_snapshots_to_keep: int = Field(alias="min-snapshots-to-keep") + + +class RemoveSnapshotsUpdate(TableUpdate): + action: TableUpdateAction = TableUpdateAction.remove_snapshots + snapshot_ids: List[int] = Field(alias="snapshot-ids") + + +class RemoveSnapshotRefUpdate(TableUpdate): + action: TableUpdateAction = TableUpdateAction.remove_snapshot_ref + ref_name: str = Field(alias="ref-name") + + +class SetLocationUpdate(TableUpdate): + action: TableUpdateAction = TableUpdateAction.set_location + location: str + + +class SetPropertiesUpdate(TableUpdate): + action: TableUpdateAction = TableUpdateAction.set_properties + updates: Dict[str, str] + + +class RemovePropertiesUpdate(TableUpdate): + action: TableUpdateAction = TableUpdateAction.remove_properties + removals: List[str] + + +class TableRequirement(IcebergBaseModel): + type: str + + +class AssertCreate(TableRequirement): + """The table must not already exist; used for create transactions.""" + + type: Literal["assert-create"] = Field(default="assert-create") + + +class AssertTableUUID(TableRequirement): + """The table UUID must match the requirement's `uuid`.""" + + type: Literal["assert-table-uuid"] = Field(default="assert-table-uuid") + uuid: str + + +class AssertRefSnapshotId(TableRequirement): + """The table branch or tag identified by the requirement's `ref` must reference the requirement's `snapshot-id`. + + if `snapshot-id` is `null` or missing, the ref must not already exist. + """ + + type: Literal["assert-ref-snapshot-id"] = Field(default="assert-ref-snapshot-id") + ref: str + snapshot_id: int = Field(..., alias="snapshot-id") + + +class AssertLastAssignedFieldId(TableRequirement): + """The table's last assigned column id must match the requirement's `last-assigned-field-id`.""" + + type: Literal["assert-last-assigned-field-id"] = Field(default="assert-last-assigned-field-id") + last_assigned_field_id: int = Field(..., alias="last-assigned-field-id") + + +class AssertCurrentSchemaId(TableRequirement): + """The table's current schema id must match the requirement's `current-schema-id`.""" + + type: Literal["assert-current-schema-id"] = Field(default="assert-current-schema-id") + current_schema_id: int = Field(..., alias="current-schema-id") + + +class AssertLastAssignedPartitionId(TableRequirement): + """The table's last assigned partition id must match the requirement's `last-assigned-partition-id`.""" + + type: Literal["assert-last-assigned-partition-id"] = Field(default="assert-last-assigned-partition-id") + last_assigned_partition_id: int = Field(..., alias="last-assigned-partition-id") + + +class AssertDefaultSpecId(TableRequirement): + """The table's default spec id must match the requirement's `default-spec-id`.""" + + type: Literal["assert-default-spec-id"] = Field(default="assert-default-spec-id") + default_spec_id: int = Field(..., alias="default-spec-id") + + +class AssertDefaultSortOrderId(TableRequirement): + """The table's default sort order id must match the requirement's `default-sort-order-id`.""" + + type: Literal["assert-default-sort-order-id"] = Field(default="assert-default-sort-order-id") + default_sort_order_id: int = Field(..., alias="default-sort-order-id") + + +class CommitTableRequest(IcebergBaseModel): + identifier: Identifier = Field() + requirements: Tuple[SerializeAsAny[TableRequirement], ...] = Field(default_factory=tuple) + updates: Tuple[SerializeAsAny[TableUpdate], ...] = Field(default_factory=tuple) + + +class CommitTableResponse(IcebergBaseModel): + metadata: TableMetadata + metadata_location: str = Field(alias="metadata-location") + + +class Table: + identifier: Identifier = Field() + metadata: TableMetadata + metadata_location: str = Field() + io: FileIO + catalog: Catalog + + def __init__( + self, identifier: Identifier, metadata: TableMetadata, metadata_location: str, io: FileIO, catalog: Catalog + ) -> None: + self.identifier = identifier + self.metadata = metadata + self.metadata_location = metadata_location + self.io = io + self.catalog = catalog + + def transaction(self) -> Transaction: + return Transaction(self) + + def refresh(self) -> Table: + """Refresh the current table metadata.""" + fresh = self.catalog.load_table(self.identifier[1:]) + self.metadata = fresh.metadata + self.io = fresh.io + self.metadata_location = fresh.metadata_location + return self + + def name(self) -> Identifier: + """Return the identifier of this table.""" + return self.identifier + + def scan( + self, + row_filter: Union[str, BooleanExpression] = ALWAYS_TRUE, + selected_fields: Tuple[str, ...] = ("*",), + case_sensitive: bool = True, + snapshot_id: Optional[int] = None, + options: Properties = EMPTY_DICT, + limit: Optional[int] = None, + ) -> DataScan: + return DataScan( + table=self, + row_filter=row_filter, + selected_fields=selected_fields, + case_sensitive=case_sensitive, + snapshot_id=snapshot_id, + options=options, + limit=limit, + ) + + def schema(self) -> Schema: + """Return the schema for this table.""" + return next(schema for schema in self.metadata.schemas if schema.schema_id == self.metadata.current_schema_id) + + def schemas(self) -> Dict[int, Schema]: + """Return a dict of the schema of this table.""" + return {schema.schema_id: schema for schema in self.metadata.schemas} + + def spec(self) -> PartitionSpec: + """Return the partition spec of this table.""" + return next(spec for spec in self.metadata.partition_specs if spec.spec_id == self.metadata.default_spec_id) + + def specs(self) -> Dict[int, PartitionSpec]: + """Return a dict the partition specs this table.""" + return {spec.spec_id: spec for spec in self.metadata.partition_specs} + + def sort_order(self) -> SortOrder: + """Return the sort order of this table.""" + return next( + sort_order for sort_order in self.metadata.sort_orders if sort_order.order_id == self.metadata.default_sort_order_id + ) + + def sort_orders(self) -> Dict[int, SortOrder]: + """Return a dict of the sort orders of this table.""" + return {sort_order.order_id: sort_order for sort_order in self.metadata.sort_orders} + + @property + def properties(self) -> Dict[str, str]: + """Properties of the table.""" + return self.metadata.properties + + def location(self) -> str: + """Return the table's base location.""" + return self.metadata.location + + def current_snapshot(self) -> Optional[Snapshot]: + """Get the current snapshot for this table, or None if there is no current snapshot.""" + if snapshot_id := self.metadata.current_snapshot_id: + return self.snapshot_by_id(snapshot_id) + return None + + def snapshot_by_id(self, snapshot_id: int) -> Optional[Snapshot]: + """Get the snapshot of this table with the given id, or None if there is no matching snapshot.""" + try: + return next(snapshot for snapshot in self.metadata.snapshots if snapshot.snapshot_id == snapshot_id) + except StopIteration: + return None + + def snapshot_by_name(self, name: str) -> Optional[Snapshot]: + """Return the snapshot referenced by the given name or null if no such reference exists.""" + if ref := self.metadata.refs.get(name): + return self.snapshot_by_id(ref.snapshot_id) + return None + + def history(self) -> List[SnapshotLogEntry]: + """Get the snapshot history of this table.""" + return self.metadata.snapshot_log + + def update_schema(self, allow_incompatible_changes: bool = False, case_sensitive: bool = True) -> UpdateSchema: + return UpdateSchema(self, allow_incompatible_changes=allow_incompatible_changes, case_sensitive=case_sensitive) + + def _do_commit(self, updates: Tuple[TableUpdate, ...], requirements: Tuple[TableRequirement, ...]) -> None: + response = self.catalog._commit_table( # pylint: disable=W0212 + CommitTableRequest(identifier=self.identifier[1:], updates=updates, requirements=requirements) + ) # pylint: disable=W0212 + self.metadata = response.metadata + self.metadata_location = response.metadata_location + + def __eq__(self, other: Any) -> bool: + """Return the equality of two instances of the Table class.""" + return ( + self.identifier == other.identifier + and self.metadata == other.metadata + and self.metadata_location == other.metadata_location + if isinstance(other, Table) + else False + ) + + def __repr__(self) -> str: + """Return the string representation of the Table class.""" + table_name = self.catalog.table_name_from(self.identifier) + schema_str = ",\n ".join(str(column) for column in self.schema().columns if self.schema()) + partition_str = f"partition by: [{', '.join(field.name for field in self.spec().fields if self.spec())}]" + sort_order_str = f"sort order: [{', '.join(str(field) for field in self.sort_order().fields if self.sort_order())}]" + snapshot_str = f"snapshot: {str(self.current_snapshot()) if self.current_snapshot() else 'null'}" + result_str = f"{table_name}(\n {schema_str}\n),\n{partition_str},\n{sort_order_str},\n{snapshot_str}" + return result_str + + +class StaticTable(Table): + """Load a table directly from a metadata file (i.e., without using a catalog).""" + + def refresh(self) -> Table: + """Refresh the current table metadata.""" + raise NotImplementedError("To be implemented") + + @classmethod + def from_metadata(cls, metadata_location: str, properties: Properties = EMPTY_DICT) -> StaticTable: + io = load_file_io(properties=properties, location=metadata_location) + file = io.new_input(metadata_location) + + from pyiceberg.serializers import FromInputFile + + metadata = FromInputFile.table_metadata(file) + + from pyiceberg.catalog.noop import NoopCatalog + + return cls( + identifier=("static-table", metadata_location), + metadata_location=metadata_location, + metadata=metadata, + io=load_file_io({**properties, **metadata.properties}), + catalog=NoopCatalog("static-table"), + ) + + +def _parse_row_filter(expr: Union[str, BooleanExpression]) -> BooleanExpression: + """Accept an expression in the form of a BooleanExpression or a string. + + In the case of a string, it will be converted into a unbound BooleanExpression. + + Args: + expr: Expression as a BooleanExpression or a string. + + Returns: An unbound BooleanExpression. + """ + return parser.parse(expr) if isinstance(expr, str) else expr + + +S = TypeVar("S", bound="TableScan", covariant=True) + + +class TableScan(ABC): + table: Table + row_filter: BooleanExpression + selected_fields: Tuple[str, ...] + case_sensitive: bool + snapshot_id: Optional[int] + options: Properties + limit: Optional[int] + + def __init__( + self, + table: Table, + row_filter: Union[str, BooleanExpression] = ALWAYS_TRUE, + selected_fields: Tuple[str, ...] = ("*",), + case_sensitive: bool = True, + snapshot_id: Optional[int] = None, + options: Properties = EMPTY_DICT, + limit: Optional[int] = None, + ): + self.table = table + self.row_filter = _parse_row_filter(row_filter) + self.selected_fields = selected_fields + self.case_sensitive = case_sensitive + self.snapshot_id = snapshot_id + self.options = options + self.limit = limit + + def snapshot(self) -> Optional[Snapshot]: + if self.snapshot_id: + return self.table.snapshot_by_id(self.snapshot_id) + return self.table.current_snapshot() + + def projection(self) -> Schema: + snapshot_schema = self.table.schema() + if snapshot := self.snapshot(): + if snapshot_schema_id := snapshot.schema_id: + snapshot_schema = self.table.schemas()[snapshot_schema_id] + + if "*" in self.selected_fields: + return snapshot_schema + + return snapshot_schema.select(*self.selected_fields, case_sensitive=self.case_sensitive) + + @abstractmethod + def plan_files(self) -> Iterable[ScanTask]: + ... + + @abstractmethod + def to_arrow(self) -> pa.Table: + ... + + @abstractmethod + def to_pandas(self, **kwargs: Any) -> pd.DataFrame: + ... + + def update(self: S, **overrides: Any) -> S: + """Create a copy of this table scan with updated fields.""" + return type(self)(**{**self.__dict__, **overrides}) + + def use_ref(self: S, name: str) -> S: + if self.snapshot_id: + raise ValueError(f"Cannot override ref, already set snapshot id={self.snapshot_id}") + if snapshot := self.table.snapshot_by_name(name): + return self.update(snapshot_id=snapshot.snapshot_id) + + raise ValueError(f"Cannot scan unknown ref={name}") + + def select(self: S, *field_names: str) -> S: + if "*" in self.selected_fields: + return self.update(selected_fields=field_names) + return self.update(selected_fields=tuple(set(self.selected_fields).intersection(set(field_names)))) + + def filter(self: S, expr: Union[str, BooleanExpression]) -> S: + return self.update(row_filter=And(self.row_filter, _parse_row_filter(expr))) + + def with_case_sensitive(self: S, case_sensitive: bool = True) -> S: + return self.update(case_sensitive=case_sensitive) + + +class ScanTask(ABC): + pass + + +@dataclass(init=False) +class FileScanTask(ScanTask): + file: DataFile + delete_files: Set[DataFile] + start: int + length: int + + def __init__( + self, + data_file: DataFile, + delete_files: Optional[Set[DataFile]] = None, + start: Optional[int] = None, + length: Optional[int] = None, + ) -> None: + self.file = data_file + self.delete_files = delete_files or set() + self.start = start or 0 + self.length = length or data_file.file_size_in_bytes + + +def _open_manifest( + io: FileIO, + manifest: ManifestFile, + partition_filter: Callable[[DataFile], bool], + metrics_evaluator: Callable[[DataFile], bool], +) -> List[ManifestEntry]: + return [ + manifest_entry + for manifest_entry in manifest.fetch_manifest_entry(io, discard_deleted=True) + if partition_filter(manifest_entry.data_file) and metrics_evaluator(manifest_entry.data_file) + ] + + +def _min_data_file_sequence_number(manifests: List[ManifestFile]) -> int: + try: + return min( + manifest.min_sequence_number or INITIAL_SEQUENCE_NUMBER + for manifest in manifests + if manifest.content == ManifestContent.DATA + ) + except ValueError: + # In case of an empty iterator + return INITIAL_SEQUENCE_NUMBER + + +def _match_deletes_to_datafile(data_entry: ManifestEntry, positional_delete_entries: SortedList[ManifestEntry]) -> Set[DataFile]: + """Check if the delete file is relevant for the data file. + + Using the column metrics to see if the filename is in the lower and upper bound. + + Args: + data_entry (ManifestEntry): The manifest entry path of the datafile. + positional_delete_entries (List[ManifestEntry]): All the candidate positional deletes manifest entries. + + Returns: + A set of files that are relevant for the data file. + """ + relevant_entries = positional_delete_entries[positional_delete_entries.bisect_right(data_entry) :] + + if len(relevant_entries) > 0: + evaluator = _InclusiveMetricsEvaluator(POSITIONAL_DELETE_SCHEMA, EqualTo("file_path", data_entry.data_file.file_path)) + return { + positional_delete_entry.data_file + for positional_delete_entry in relevant_entries + if evaluator.eval(positional_delete_entry.data_file) + } + else: + return set() + + +class DataScan(TableScan): + def __init__( + self, + table: Table, + row_filter: Union[str, BooleanExpression] = ALWAYS_TRUE, + selected_fields: Tuple[str, ...] = ("*",), + case_sensitive: bool = True, + snapshot_id: Optional[int] = None, + options: Properties = EMPTY_DICT, + limit: Optional[int] = None, + ): + super().__init__(table, row_filter, selected_fields, case_sensitive, snapshot_id, options, limit) + + def _build_partition_projection(self, spec_id: int) -> BooleanExpression: + project = inclusive_projection(self.table.schema(), self.table.specs()[spec_id]) + return project(self.row_filter) + + @cached_property + def partition_filters(self) -> KeyDefaultDict[int, BooleanExpression]: + return KeyDefaultDict(self._build_partition_projection) + + def _build_manifest_evaluator(self, spec_id: int) -> Callable[[ManifestFile], bool]: + spec = self.table.specs()[spec_id] + return visitors.manifest_evaluator(spec, self.table.schema(), self.partition_filters[spec_id], self.case_sensitive) + + def _build_partition_evaluator(self, spec_id: int) -> Callable[[DataFile], bool]: + spec = self.table.specs()[spec_id] + partition_type = spec.partition_type(self.table.schema()) + partition_schema = Schema(*partition_type.fields) + partition_expr = self.partition_filters[spec_id] + + evaluator = visitors.expression_evaluator(partition_schema, partition_expr, self.case_sensitive) + return lambda data_file: evaluator(data_file.partition) + + def _check_sequence_number(self, min_data_sequence_number: int, manifest: ManifestFile) -> bool: + """Ensure that no manifests are loaded that contain deletes that are older than the data. + + Args: + min_data_sequence_number (int): The minimal sequence number. + manifest (ManifestFile): A ManifestFile that can be either data or deletes. + + Returns: + Boolean indicating if it is either a data file, or a relevant delete file. + """ + return manifest.content == ManifestContent.DATA or ( + # Not interested in deletes that are older than the data + manifest.content == ManifestContent.DELETES + and (manifest.sequence_number or INITIAL_SEQUENCE_NUMBER) >= min_data_sequence_number + ) + + def plan_files(self) -> Iterable[FileScanTask]: + """Plans the relevant files by filtering on the PartitionSpecs. + + Returns: + List of FileScanTasks that contain both data and delete files. + """ + snapshot = self.snapshot() + if not snapshot: + return iter([]) + + io = self.table.io + + # step 1: filter manifests using partition summaries + # the filter depends on the partition spec used to write the manifest file, so create a cache of filters for each spec id + + manifest_evaluators: Dict[int, Callable[[ManifestFile], bool]] = KeyDefaultDict(self._build_manifest_evaluator) + + manifests = [ + manifest_file + for manifest_file in snapshot.manifests(io) + if manifest_evaluators[manifest_file.partition_spec_id](manifest_file) + ] + + # step 2: filter the data files in each manifest + # this filter depends on the partition spec used to write the manifest file + + partition_evaluators: Dict[int, Callable[[DataFile], bool]] = KeyDefaultDict(self._build_partition_evaluator) + metrics_evaluator = _InclusiveMetricsEvaluator( + self.table.schema(), self.row_filter, self.case_sensitive, self.options.get("include_empty_files") == "true" + ).eval + + min_data_sequence_number = _min_data_file_sequence_number(manifests) + + data_entries: List[ManifestEntry] = [] + positional_delete_entries = SortedList(key=lambda entry: entry.data_sequence_number or INITIAL_SEQUENCE_NUMBER) + + executor = ExecutorFactory.get_or_create() + for manifest_entry in chain( + *executor.map( + lambda args: _open_manifest(*args), + [ + ( + io, + manifest, + partition_evaluators[manifest.partition_spec_id], + metrics_evaluator, + ) + for manifest in manifests + if self._check_sequence_number(min_data_sequence_number, manifest) + ], + ) + ): + data_file = manifest_entry.data_file + if data_file.content == DataFileContent.DATA: + data_entries.append(manifest_entry) + elif data_file.content == DataFileContent.POSITION_DELETES: + positional_delete_entries.add(manifest_entry) + elif data_file.content == DataFileContent.EQUALITY_DELETES: + raise ValueError("PyIceberg does not yet support equality deletes: https://github.com/apache/iceberg/issues/6568") + else: + raise ValueError(f"Unknown DataFileContent ({data_file.content}): {manifest_entry}") + + return [ + FileScanTask( + data_entry.data_file, + delete_files=_match_deletes_to_datafile( + data_entry, + positional_delete_entries, + ), + ) + for data_entry in data_entries + ] + + def to_arrow(self) -> pa.Table: + from pyiceberg.io.pyarrow import project_table + + return project_table( + self.plan_files(), + self.table, + self.row_filter, + self.projection(), + case_sensitive=self.case_sensitive, + limit=self.limit, + ) + + def to_pandas(self, **kwargs: Any) -> pd.DataFrame: + return self.to_arrow().to_pandas(**kwargs) + + def to_duckdb(self, table_name: str, connection: Optional[DuckDBPyConnection] = None) -> DuckDBPyConnection: + import duckdb + + con = connection or duckdb.connect(database=":memory:") + con.register(table_name, self.to_arrow()) + + return con + + def to_ray(self) -> ray.data.dataset.Dataset: + import ray + + return ray.data.from_arrow(self.to_arrow()) + + +class MoveOperation(Enum): + First = 1 + Before = 2 + After = 3 + + +@dataclass +class Move: + field_id: int + full_name: str + op: MoveOperation + other_field_id: Optional[int] = None + + +class UpdateSchema: + _table: Table + _schema: Schema + _last_column_id: itertools.count[int] + _identifier_field_names: Set[str] + + _adds: Dict[int, List[NestedField]] = {} + _updates: Dict[int, NestedField] = {} + _deletes: Set[int] = set() + _moves: Dict[int, List[Move]] = {} + + _added_name_to_id: Dict[str, int] = {} + # Part of https://github.com/apache/iceberg/pull/8393 + _id_to_parent: Dict[int, str] = {} + _allow_incompatible_changes: bool + _case_sensitive: bool + _transaction: Optional[Transaction] + + def __init__( + self, + table: Table, + transaction: Optional[Transaction] = None, + allow_incompatible_changes: bool = False, + case_sensitive: bool = True, + ) -> None: + self._table = table + self._schema = table.schema() + self._last_column_id = itertools.count(table.metadata.last_column_id + 1) + self._identifier_field_names = self._schema.identifier_field_names() + + self._adds = {} + self._updates = {} + self._deletes = set() + self._moves = {} + + self._added_name_to_id = {} + + def get_column_name(field_id: int) -> str: + column_name = self._schema.find_column_name(column_id=field_id) + if column_name is None: + raise ValueError(f"Could not find field-id: {field_id}") + return column_name + + self._id_to_parent = { + field_id: get_column_name(parent_field_id) for field_id, parent_field_id in self._schema._lazy_id_to_parent.items() + } + + self._allow_incompatible_changes = allow_incompatible_changes + self._case_sensitive = case_sensitive + self._transaction = transaction + + def __exit__(self, _: Any, value: Any, traceback: Any) -> None: + """Close and commit the change.""" + return self.commit() + + def __enter__(self) -> UpdateSchema: + """Update the table.""" + return self + + def case_sensitive(self, case_sensitive: bool) -> UpdateSchema: + """Determine if the case of schema needs to be considered when comparing column names. + + Args: + case_sensitive: When false case is not considered in column name comparisons. + + Returns: + This for method chaining + """ + self._case_sensitive = case_sensitive + return self + + def add_column( + self, path: Union[str, Tuple[str, ...]], field_type: IcebergType, doc: Optional[str] = None, required: bool = False + ) -> UpdateSchema: + """Add a new column to a nested struct or Add a new top-level column. + + Because "." may be interpreted as a column path separator or may be used in field names, it + is not allowed to add nested column by passing in a string. To add to nested structures or + to add fields with names that contain "." use a tuple instead to indicate the path. + + If type is a nested type, its field IDs are reassigned when added to the existing schema. + + Args: + path: Name for the new column. + field_type: Type for the new column. + doc: Documentation string for the new column. + required: Whether the new column is required. + + Returns: + This for method chaining. + """ + if isinstance(path, str): + if "." in path: + raise ValueError(f"Cannot add column with ambiguous name: {path}, provide a tuple instead") + path = (path,) + + if required and not self._allow_incompatible_changes: + # Table format version 1 and 2 cannot add required column because there is no initial value + raise ValueError(f'Incompatible change: cannot add required column: {".".join(path)}') + + name = path[-1] + parent = path[:-1] + + full_name = ".".join(path) + parent_full_path = ".".join(parent) + parent_id: int = TABLE_ROOT_ID + + if len(parent) > 0: + parent_field = self._schema.find_field(parent_full_path, self._case_sensitive) + parent_type = parent_field.field_type + if isinstance(parent_type, MapType): + parent_field = parent_type.value_field + elif isinstance(parent_type, ListType): + parent_field = parent_type.element_field + + if not parent_field.field_type.is_struct: + raise ValueError(f"Cannot add column '{name}' to non-struct type: {parent_full_path}") + + parent_id = parent_field.field_id + + existing_field = None + try: + existing_field = self._schema.find_field(full_name, self._case_sensitive) + except ValueError: + pass + + if existing_field is not None and existing_field.field_id not in self._deletes: + raise ValueError(f"Cannot add column, name already exists: {full_name}") + + # assign new IDs in order + new_id = self.assign_new_column_id() + + # update tracking for moves + self._added_name_to_id[full_name] = new_id + self._id_to_parent[new_id] = parent_full_path + + new_type = assign_fresh_schema_ids(field_type, self.assign_new_column_id) + field = NestedField(field_id=new_id, name=name, field_type=new_type, required=required, doc=doc) + + if parent_id in self._adds: + self._adds[parent_id].append(field) + else: + self._adds[parent_id] = [field] + + return self + + def delete_column(self, path: Union[str, Tuple[str, ...]]) -> UpdateSchema: + """Delete a column from a table. + + Args: + path: The path to the column. + + Returns: + The UpdateSchema with the delete operation staged. + """ + name = (path,) if isinstance(path, str) else path + full_name = ".".join(name) + + field = self._schema.find_field(full_name, case_sensitive=self._case_sensitive) + + if field.field_id in self._adds: + raise ValueError(f"Cannot delete a column that has additions: {full_name}") + if field.field_id in self._updates: + raise ValueError(f"Cannot delete a column that has updates: {full_name}") + + self._deletes.add(field.field_id) + + return self + + def rename_column(self, path_from: Union[str, Tuple[str, ...]], new_name: str) -> UpdateSchema: + """Update the name of a column. + + Args: + path_from: The path to the column to be renamed. + new_name: The new path of the column. + + Returns: + The UpdateSchema with the rename operation staged. + """ + path_from = ".".join(path_from) if isinstance(path_from, tuple) else path_from + field_from = self._schema.find_field(path_from, self._case_sensitive) + + if field_from.field_id in self._deletes: + raise ValueError(f"Cannot rename a column that will be deleted: {path_from}") + + if updated := self._updates.get(field_from.field_id): + self._updates[field_from.field_id] = NestedField( + field_id=updated.field_id, + name=new_name, + field_type=updated.field_type, + doc=updated.doc, + required=updated.required, + ) + else: + self._updates[field_from.field_id] = NestedField( + field_id=field_from.field_id, + name=new_name, + field_type=field_from.field_type, + doc=field_from.doc, + required=field_from.required, + ) + + # Lookup the field because of casing + from_field_correct_casing = self._schema.find_column_name(field_from.field_id) + if from_field_correct_casing in self._identifier_field_names: + self._identifier_field_names.remove(from_field_correct_casing) + new_identifier_path = f"{from_field_correct_casing[:-len(field_from.name)]}{new_name}" + self._identifier_field_names.add(new_identifier_path) + + return self + + def make_column_optional(self, path: Union[str, Tuple[str, ...]]) -> UpdateSchema: + """Make a column optional. + + Args: + path: The path to the field. + + Returns: + The UpdateSchema with the requirement change staged. + """ + self._set_column_requirement(path, required=False) + return self + + def set_identifier_fields(self, *fields: str) -> None: + self._identifier_field_names = set(fields) + + def _set_column_requirement(self, path: Union[str, Tuple[str, ...]], required: bool) -> None: + path = (path,) if isinstance(path, str) else path + name = ".".join(path) + + field = self._schema.find_field(name, self._case_sensitive) + + if (field.required and required) or (field.optional and not required): + # if the change is a noop, allow it even if allowIncompatibleChanges is false + return + + if not self._allow_incompatible_changes and required: + raise ValueError(f"Cannot change column nullability: {name}: optional -> required") + + if field.field_id in self._deletes: + raise ValueError(f"Cannot update a column that will be deleted: {name}") + + if updated := self._updates.get(field.field_id): + self._updates[field.field_id] = NestedField( + field_id=updated.field_id, + name=updated.name, + field_type=updated.field_type, + doc=updated.doc, + required=required, + ) + else: + self._updates[field.field_id] = NestedField( + field_id=field.field_id, + name=field.name, + field_type=field.field_type, + doc=field.doc, + required=required, + ) + + def update_column( + self, + path: Union[str, Tuple[str, ...]], + field_type: Optional[IcebergType] = None, + required: Optional[bool] = None, + doc: Optional[str] = None, + ) -> UpdateSchema: + """Update the type of column. + + Args: + path: The path to the field. + field_type: The new type + required: If the field should be required + doc: Documentation describing the column + + Returns: + The UpdateSchema with the type update staged. + """ + path = (path,) if isinstance(path, str) else path + full_name = ".".join(path) + + if field_type is None and required is None and doc is None: + return self + + field = self._schema.find_field(full_name, self._case_sensitive) + + if field.field_id in self._deletes: + raise ValueError(f"Cannot update a column that will be deleted: {full_name}") + + if field_type is not None: + if not field.field_type.is_primitive: + raise ValidationError(f"Cannot change column type: {field.field_type} is not a primitive") + + if not self._allow_incompatible_changes and field.field_type != field_type: + try: + promote(field.field_type, field_type) + except ResolveError as e: + raise ValidationError(f"Cannot change column type: {full_name}: {field.field_type} -> {field_type}") from e + + if updated := self._updates.get(field.field_id): + self._updates[field.field_id] = NestedField( + field_id=updated.field_id, + name=updated.name, + field_type=field_type or updated.field_type, + doc=doc or updated.doc, + required=updated.required, + ) + else: + self._updates[field.field_id] = NestedField( + field_id=field.field_id, + name=field.name, + field_type=field_type or field.field_type, + doc=doc or field.doc, + required=field.required, + ) + + if required is not None: + self._set_column_requirement(path, required=required) + + return self + + def _find_for_move(self, name: str) -> Optional[int]: + try: + return self._schema.find_field(name, self._case_sensitive).field_id + except ValueError: + pass + + return self._added_name_to_id.get(name) + + def _move(self, move: Move) -> None: + if parent_name := self._id_to_parent.get(move.field_id): + parent_field = self._schema.find_field(parent_name, case_sensitive=self._case_sensitive) + if not parent_field.field_type.is_struct: + raise ValueError(f"Cannot move fields in non-struct type: {parent_field.field_type}") + + if move.op == MoveOperation.After or move.op == MoveOperation.Before: + if move.other_field_id is None: + raise ValueError("Expected other field when performing before/after move") + + if self._id_to_parent.get(move.field_id) != self._id_to_parent.get(move.other_field_id): + raise ValueError(f"Cannot move field {move.full_name} to a different struct") + + self._moves[parent_field.field_id] = self._moves.get(parent_field.field_id, []) + [move] + else: + # In the top level field + if move.op == MoveOperation.After or move.op == MoveOperation.Before: + if move.other_field_id is None: + raise ValueError("Expected other field when performing before/after move") + + if other_struct := self._id_to_parent.get(move.other_field_id): + raise ValueError(f"Cannot move field {move.full_name} to a different struct: {other_struct}") + + self._moves[TABLE_ROOT_ID] = self._moves.get(TABLE_ROOT_ID, []) + [move] + + def move_first(self, path: Union[str, Tuple[str, ...]]) -> UpdateSchema: + """Move the field to the first position of the parent struct. + + Args: + path: The path to the field. + + Returns: + The UpdateSchema with the move operation staged. + """ + full_name = ".".join(path) if isinstance(path, tuple) else path + + field_id = self._find_for_move(full_name) + + if field_id is None: + raise ValueError(f"Cannot move missing column: {full_name}") + + self._move(Move(field_id=field_id, full_name=full_name, op=MoveOperation.First)) + + return self + + def move_before(self, path: Union[str, Tuple[str, ...]], before_path: Union[str, Tuple[str, ...]]) -> UpdateSchema: + """Move the field to before another field. + + Args: + path: The path to the field. + + Returns: + The UpdateSchema with the move operation staged. + """ + full_name = ".".join(path) if isinstance(path, tuple) else path + field_id = self._find_for_move(full_name) + + if field_id is None: + raise ValueError(f"Cannot move missing column: {full_name}") + + before_full_name = ( + ".".join( + before_path, + ) + if isinstance(before_path, tuple) + else before_path + ) + before_field_id = self._find_for_move(before_full_name) + + if before_field_id is None: + raise ValueError(f"Cannot move {full_name} before missing column: {before_full_name}") + + if field_id == before_field_id: + raise ValueError(f"Cannot move {full_name} before itself") + + self._move(Move(field_id=field_id, full_name=full_name, other_field_id=before_field_id, op=MoveOperation.Before)) + + return self + + def move_after(self, path: Union[str, Tuple[str, ...]], after_name: Union[str, Tuple[str, ...]]) -> UpdateSchema: + """Move the field to after another field. + + Args: + path: The path to the field. + + Returns: + The UpdateSchema with the move operation staged. + """ + full_name = ".".join(path) if isinstance(path, tuple) else path + + field_id = self._find_for_move(full_name) + + if field_id is None: + raise ValueError(f"Cannot move missing column: {full_name}") + + after_path = ".".join(after_name) if isinstance(after_name, tuple) else after_name + after_field_id = self._find_for_move(after_path) + + if after_field_id is None: + raise ValueError(f"Cannot move {full_name} after missing column: {after_path}") + + if field_id == after_field_id: + raise ValueError(f"Cannot move {full_name} after itself") + + self._move(Move(field_id=field_id, full_name=full_name, other_field_id=after_field_id, op=MoveOperation.After)) + + return self + + def commit(self) -> None: + """Apply the pending changes and commit.""" + new_schema = self._apply() + + if new_schema != self._schema: + last_column_id = max(self._table.metadata.last_column_id, new_schema.highest_field_id) + updates = ( + AddSchemaUpdate(schema=new_schema, last_column_id=last_column_id), + SetCurrentSchemaUpdate(schema_id=-1), + ) + requirements = (AssertCurrentSchemaId(current_schema_id=self._schema.schema_id),) + + if self._transaction is not None: + self._transaction._append_updates(*updates) # pylint: disable=W0212 + self._transaction._append_requirements(*requirements) # pylint: disable=W0212 + else: + self._table._do_commit(updates=updates, requirements=requirements) # pylint: disable=W0212 + + def _apply(self) -> Schema: + """Apply the pending changes to the original schema and returns the result. + + Returns: + the result Schema when all pending updates are applied + """ + struct = visit(self._schema, _ApplyChanges(self._adds, self._updates, self._deletes, self._moves)) + if struct is None: + # Should never happen + raise ValueError("Could not apply changes") + + # Check the field-ids + new_schema = Schema(*struct.fields) + field_ids = set() + for name in self._identifier_field_names: + try: + field = new_schema.find_field(name, case_sensitive=self._case_sensitive) + except ValueError as e: + raise ValueError( + f"Cannot find identifier field {name}. In case of deletion, update the identifier fields first." + ) from e + + field_ids.add(field.field_id) + + return Schema(*struct.fields, schema_id=1 + max(self._table.schemas().keys()), identifier_field_ids=field_ids) + + def assign_new_column_id(self) -> int: + return next(self._last_column_id) + + +class _ApplyChanges(SchemaVisitor[Optional[IcebergType]]): + _adds: Dict[int, List[NestedField]] + _updates: Dict[int, NestedField] + _deletes: Set[int] + _moves: Dict[int, List[Move]] + + def __init__( + self, adds: Dict[int, List[NestedField]], updates: Dict[int, NestedField], deletes: Set[int], moves: Dict[int, List[Move]] + ) -> None: + self._adds = adds + self._updates = updates + self._deletes = deletes + self._moves = moves + + def schema(self, schema: Schema, struct_result: Optional[IcebergType]) -> Optional[IcebergType]: + added = self._adds.get(TABLE_ROOT_ID) + moves = self._moves.get(TABLE_ROOT_ID) + + if added is not None or moves is not None: + if not isinstance(struct_result, StructType): + raise ValueError(f"Cannot add fields to non-struct: {struct_result}") + + if new_fields := _add_and_move_fields(struct_result.fields, added or [], moves or []): + return StructType(*new_fields) + + return struct_result + + def struct(self, struct: StructType, field_results: List[Optional[IcebergType]]) -> Optional[IcebergType]: + has_changes = False + new_fields = [] + + for idx, result_type in enumerate(field_results): + result_type = field_results[idx] + + # Has been deleted + if result_type is None: + has_changes = True + continue + + field = struct.fields[idx] + + name = field.name + doc = field.doc + required = field.required + + # There is an update + if update := self._updates.get(field.field_id): + name = update.name + doc = update.doc + required = update.required + + if field.name == name and field.field_type == result_type and field.required == required and field.doc == doc: + new_fields.append(field) + else: + has_changes = True + new_fields.append( + NestedField(field_id=field.field_id, name=name, field_type=result_type, required=required, doc=doc) + ) + + if has_changes: + return StructType(*new_fields) + + return struct + + def field(self, field: NestedField, field_result: Optional[IcebergType]) -> Optional[IcebergType]: + # the API validates deletes, updates, and additions don't conflict handle deletes + if field.field_id in self._deletes: + return None + + # handle updates + if (update := self._updates.get(field.field_id)) and field.field_type != update.field_type: + return update.field_type + + if isinstance(field_result, StructType): + # handle add & moves + added = self._adds.get(field.field_id) + moves = self._moves.get(field.field_id) + if added is not None or moves is not None: + if not isinstance(field.field_type, StructType): + raise ValueError(f"Cannot add fields to non-struct: {field}") + + if new_fields := _add_and_move_fields(field_result.fields, added or [], moves or []): + return StructType(*new_fields) + + return field_result + + def list(self, list_type: ListType, element_result: Optional[IcebergType]) -> Optional[IcebergType]: + element_type = self.field(list_type.element_field, element_result) + if element_type is None: + raise ValueError(f"Cannot delete element type from list: {element_result}") + + return ListType(element_id=list_type.element_id, element=element_type, element_required=list_type.element_required) + + def map( + self, map_type: MapType, key_result: Optional[IcebergType], value_result: Optional[IcebergType] + ) -> Optional[IcebergType]: + key_id: int = map_type.key_field.field_id + + if key_id in self._deletes: + raise ValueError(f"Cannot delete map keys: {map_type}") + + if key_id in self._updates: + raise ValueError(f"Cannot update map keys: {map_type}") + + if key_id in self._adds: + raise ValueError(f"Cannot add fields to map keys: {map_type}") + + if map_type.key_type != key_result: + raise ValueError(f"Cannot alter map keys: {map_type}") + + value_field: NestedField = map_type.value_field + value_type = self.field(value_field, value_result) + if value_type is None: + raise ValueError(f"Cannot delete value type from map: {value_field}") + + return MapType( + key_id=map_type.key_id, + key_type=map_type.key_type, + value_id=map_type.value_id, + value_type=value_type, + value_required=map_type.value_required, + ) + + def primitive(self, primitive: PrimitiveType) -> Optional[IcebergType]: + return primitive + + +def _add_fields(fields: Tuple[NestedField, ...], adds: Optional[List[NestedField]]) -> Tuple[NestedField, ...]: + adds = adds or [] + return fields + tuple(adds) + + +def _move_fields(fields: Tuple[NestedField, ...], moves: List[Move]) -> Tuple[NestedField, ...]: + reordered = list(copy(fields)) + for move in moves: + # Find the field that we're about to move + field = next(field for field in reordered if field.field_id == move.field_id) + # Remove the field that we're about to move from the list + reordered = [field for field in reordered if field.field_id != move.field_id] + + if move.op == MoveOperation.First: + reordered = [field] + reordered + elif move.op == MoveOperation.Before or move.op == MoveOperation.After: + other_field_id = move.other_field_id + other_field_pos = next(i for i, field in enumerate(reordered) if field.field_id == other_field_id) + if move.op == MoveOperation.Before: + reordered.insert(other_field_pos, field) + else: + reordered.insert(other_field_pos + 1, field) + else: + raise ValueError(f"Unknown operation: {move.op}") + + return tuple(reordered) + + +def _add_and_move_fields( + fields: Tuple[NestedField, ...], adds: List[NestedField], moves: List[Move] +) -> Optional[Tuple[NestedField, ...]]: + if len(adds) > 0: + # always apply adds first so that added fields can be moved + added = _add_fields(fields, adds) + if len(moves) > 0: + return _move_fields(added, moves) + else: + return added + elif len(moves) > 0: + return _move_fields(fields, moves) + return None if len(adds) == 0 else tuple(*fields, *adds) diff --git a/pyiceberg/table/metadata.py b/pyiceberg/table/metadata.py new file mode 100644 index 0000000000..73d76d8606 --- /dev/null +++ b/pyiceberg/table/metadata.py @@ -0,0 +1,447 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +from __future__ import annotations + +import datetime +import uuid +from copy import copy +from typing import ( + Any, + Dict, + List, + Literal, + Optional, + Union, +) + +from pydantic import Field, model_validator +from pydantic import ValidationError as PydanticValidationError +from typing_extensions import Annotated + +from pyiceberg.exceptions import ValidationError +from pyiceberg.partitioning import PARTITION_FIELD_ID_START, PartitionSpec, assign_fresh_partition_spec_ids +from pyiceberg.schema import Schema, assign_fresh_schema_ids +from pyiceberg.table.refs import MAIN_BRANCH, SnapshotRef, SnapshotRefType +from pyiceberg.table.snapshots import MetadataLogEntry, Snapshot, SnapshotLogEntry +from pyiceberg.table.sorting import ( + UNSORTED_SORT_ORDER, + UNSORTED_SORT_ORDER_ID, + SortOrder, + assign_fresh_sort_order_ids, +) +from pyiceberg.typedef import ( + EMPTY_DICT, + IcebergBaseModel, + IcebergRootModel, + Properties, +) +from pyiceberg.utils.datetime import datetime_to_millis + +CURRENT_SNAPSHOT_ID = "current-snapshot-id" +CURRENT_SCHEMA_ID = "current-schema-id" +SCHEMAS = "schemas" +DEFAULT_SPEC_ID = "default-spec-id" +PARTITION_SPEC = "partition-spec" +PARTITION_SPECS = "partition-specs" +SORT_ORDERS = "sort-orders" +LAST_PARTITION_ID = "last-partition-id" +LAST_ASSIGNED_FIELD_ID = "last-assigned-field-id" +REFS = "refs" +SPEC_ID = "spec-id" +FIELD_ID = "field-id" +FIELDS = "fields" + +INITIAL_SEQUENCE_NUMBER = 0 +INITIAL_SPEC_ID = 0 +DEFAULT_SCHEMA_ID = 0 + + +def cleanup_snapshot_id(data: Dict[str, Any]) -> Dict[str, Any]: + """Run before validation.""" + if CURRENT_SNAPSHOT_ID in data and data[CURRENT_SNAPSHOT_ID] == -1: + # We treat -1 and None the same, by cleaning this up + # in a pre-validator, we can simplify the logic later on + data[CURRENT_SNAPSHOT_ID] = None + return data + + +def check_schemas(table_metadata: TableMetadata) -> TableMetadata: + """Check if the current-schema-id is actually present in schemas.""" + current_schema_id = table_metadata.current_schema_id + + for schema in table_metadata.schemas: + if schema.schema_id == current_schema_id: + return table_metadata + + raise ValidationError(f"current-schema-id {current_schema_id} can't be found in the schemas") + + +def check_partition_specs(table_metadata: TableMetadata) -> TableMetadata: + """Check if the default-spec-id is present in partition-specs.""" + default_spec_id = table_metadata.default_spec_id + + partition_specs: List[PartitionSpec] = table_metadata.partition_specs + for spec in partition_specs: + if spec.spec_id == default_spec_id: + return table_metadata + + raise ValidationError(f"default-spec-id {default_spec_id} can't be found") + + +def check_sort_orders(table_metadata: TableMetadata) -> TableMetadata: + """Check if the default_sort_order_id is present in sort-orders.""" + default_sort_order_id: int = table_metadata.default_sort_order_id + + if default_sort_order_id != UNSORTED_SORT_ORDER_ID: + sort_orders: List[SortOrder] = table_metadata.sort_orders + for sort_order in sort_orders: + if sort_order.order_id == default_sort_order_id: + return table_metadata + + raise ValidationError(f"default-sort-order-id {default_sort_order_id} can't be found in {sort_orders}") + return table_metadata + + +def construct_refs(table_metadata: TableMetadata) -> TableMetadata: + """Set the main branch if missing.""" + if table_metadata.current_snapshot_id is not None: + if MAIN_BRANCH not in table_metadata.refs: + table_metadata.refs[MAIN_BRANCH] = SnapshotRef( + snapshot_id=table_metadata.current_snapshot_id, snapshot_ref_type=SnapshotRefType.BRANCH + ) + return table_metadata + + +class TableMetadataCommonFields(IcebergBaseModel): + """Metadata for an Iceberg table as specified in the Apache Iceberg spec. + + https://iceberg.apache.org/spec/#iceberg-table-spec + """ + + location: str = Field() + """The table’s base location. This is used by writers to determine where + to store data files, manifest files, and table metadata files.""" + + table_uuid: uuid.UUID = Field(alias="table-uuid", default_factory=uuid.uuid4) + """A UUID that identifies the table, generated when the table is created. + Implementations must throw an exception if a table’s UUID does not match + the expected UUID after refreshing metadata.""" + + last_updated_ms: int = Field( + alias="last-updated-ms", default_factory=lambda: datetime_to_millis(datetime.datetime.now().astimezone()) + ) + """Timestamp in milliseconds from the unix epoch when the table + was last updated. Each table metadata file should update this + field just before writing.""" + + last_column_id: int = Field(alias="last-column-id") + """An integer; the highest assigned column ID for the table. + This is used to ensure fields are always assigned an unused ID + when evolving schemas.""" + + schemas: List[Schema] = Field(default_factory=list) + """A list of schemas, stored as objects with schema-id.""" + + current_schema_id: int = Field(alias="current-schema-id", default=DEFAULT_SCHEMA_ID) + """ID of the table’s current schema.""" + + partition_specs: List[PartitionSpec] = Field(alias="partition-specs", default_factory=list) + """A list of partition specs, stored as full partition spec objects.""" + + default_spec_id: int = Field(alias="default-spec-id", default=INITIAL_SPEC_ID) + """ID of the “current” spec that writers should use by default.""" + + last_partition_id: Optional[int] = Field(alias="last-partition-id", default=None) + """An integer; the highest assigned partition field ID across all + partition specs for the table. This is used to ensure partition fields + are always assigned an unused ID when evolving specs.""" + + properties: Dict[str, str] = Field(default_factory=dict) + """A string to string map of table properties. This is used to + control settings that affect reading and writing and is not intended + to be used for arbitrary metadata. For example, commit.retry.num-retries + is used to control the number of commit retries.""" + + current_snapshot_id: Optional[int] = Field(alias="current-snapshot-id", default=None) + """ID of the current table snapshot.""" + + snapshots: List[Snapshot] = Field(default_factory=list) + """A list of valid snapshots. Valid snapshots are snapshots for which + all data files exist in the file system. A data file must not be + deleted from the file system until the last snapshot in which it was + listed is garbage collected.""" + + snapshot_log: List[SnapshotLogEntry] = Field(alias="snapshot-log", default_factory=list) + """A list (optional) of timestamp and snapshot ID pairs that encodes + changes to the current snapshot for the table. Each time the + current-snapshot-id is changed, a new entry should be added with the + last-updated-ms and the new current-snapshot-id. When snapshots are + expired from the list of valid snapshots, all entries before a snapshot + that has expired should be removed.""" + + metadata_log: List[MetadataLogEntry] = Field(alias="metadata-log", default_factory=list) + """A list (optional) of timestamp and metadata file location pairs that + encodes changes to the previous metadata files for the table. Each time + a new metadata file is created, a new entry of the previous metadata + file location should be added to the list. Tables can be configured to + remove oldest metadata log entries and keep a fixed-size log of the most + recent entries after a commit.""" + + sort_orders: List[SortOrder] = Field(alias="sort-orders", default_factory=list) + """A list of sort orders, stored as full sort order objects.""" + + default_sort_order_id: int = Field(alias="default-sort-order-id", default=UNSORTED_SORT_ORDER_ID) + """Default sort order id of the table. Note that this could be used by + writers, but is not used when reading because reads use the specs stored + in manifest files.""" + + refs: Dict[str, SnapshotRef] = Field(default_factory=dict) + """A map of snapshot references. + The map keys are the unique snapshot reference names in the table, + and the map values are snapshot reference objects. + There is always a main branch reference pointing to the + current-snapshot-id even if the refs map is null.""" + + +class TableMetadataV1(TableMetadataCommonFields, IcebergBaseModel): + """Represents version 1 of the Table Metadata. + + More information about the specification: + https://iceberg.apache.org/spec/#version-1-analytic-data-tables + """ + + # When we read a V1 format-version, we'll make sure to populate the fields + # for V2 as well. This makes it easier downstream because we can just + # assume that everything is a TableMetadataV2. + # When writing, we should stick to the same version that it was, + # because bumping the version should be an explicit operation that is up + # to the owner of the table. + + @model_validator(mode="before") + def cleanup_snapshot_id(cls, data: Dict[str, Any]) -> Dict[str, Any]: + return cleanup_snapshot_id(data) + + @model_validator(mode="after") + def construct_refs(cls, data: TableMetadataV1) -> TableMetadataV1: + return construct_refs(data) + + @model_validator(mode="before") + def set_v2_compatible_defaults(cls, data: Dict[str, Any]) -> Dict[str, Any]: + """Set default values to be compatible with the format v2. + + Args: + data: The raw arguments when initializing a V1 TableMetadata. + + Returns: + The TableMetadata with the defaults applied. + """ + # When the schema doesn't have an ID + if data.get("schema") and "schema_id" not in data["schema"]: + data["schema"]["schema_id"] = DEFAULT_SCHEMA_ID + + return data + + @model_validator(mode="before") + def construct_schemas(cls, data: Dict[str, Any]) -> Dict[str, Any]: + """Convert the schema into schemas. + + For V1 schemas is optional, and if they aren't set, we'll set them + in this validator. This was we can always use the schemas when reading + table metadata, and we don't have to worry if it is a v1 or v2 format. + + Args: + data: The raw data after validation, meaning that the aliases are applied. + + Returns: + The TableMetadata with the schemas set, if not provided. + """ + if not data.get("schemas"): + schema = data["schema"] + data["schemas"] = [schema] + return data + + @model_validator(mode="before") + def construct_partition_specs(cls, data: Dict[str, Any]) -> Dict[str, Any]: + """Convert the partition_spec into partition_specs. + + For V1 partition_specs is optional, and if they aren't set, we'll set them + in this validator. This was we can always use the partition_specs when reading + table metadata, and we don't have to worry if it is a v1 or v2 format. + + Args: + data: The raw data after validation, meaning that the aliases are applied. + + Returns: + The TableMetadata with the partition_specs set, if not provided. + """ + if not data.get(PARTITION_SPECS): + if data.get(PARTITION_SPEC) is not None: + # Promote the spec from partition-spec to partition-specs + fields = data[PARTITION_SPEC] + data[PARTITION_SPECS] = [{SPEC_ID: INITIAL_SPEC_ID, FIELDS: fields}] + data[DEFAULT_SPEC_ID] = INITIAL_SPEC_ID + else: + data[PARTITION_SPECS] = [{"field-id": 0, "fields": ()}] + + data[LAST_PARTITION_ID] = max( + [field.get(FIELD_ID) for spec in data[PARTITION_SPECS] for field in spec[FIELDS]], default=PARTITION_FIELD_ID_START + ) + + return data + + @model_validator(mode="before") + def set_sort_orders(cls, data: Dict[str, Any]) -> Dict[str, Any]: + """Set the sort_orders if not provided. + + For V1 sort_orders is optional, and if they aren't set, we'll set them + in this validator. + + Args: + data: The raw data after validation, meaning that the aliases are applied. + + Returns: + The TableMetadata with the sort_orders set, if not provided. + """ + if not data.get(SORT_ORDERS): + data[SORT_ORDERS] = [UNSORTED_SORT_ORDER] + return data + + def to_v2(self) -> TableMetadataV2: + metadata = copy(self.model_dump()) + metadata["format-version"] = 2 + return TableMetadataV2.model_validate(metadata) + + format_version: Literal[1] = Field(alias="format-version") + """An integer version number for the format. Currently, this can be 1 or 2 + based on the spec. Implementations must throw an exception if a table’s + version is higher than the supported version.""" + + schema_: Schema = Field(alias="schema") + """The table’s current schema. (Deprecated: use schemas and + current-schema-id instead).""" + + partition_spec: List[Dict[str, Any]] = Field(alias="partition-spec") + """The table’s current partition spec, stored as only fields. + Note that this is used by writers to partition data, but is + not used when reading because reads use the specs stored in + manifest files. (Deprecated: use partition-specs and default-spec-id + instead).""" + + +class TableMetadataV2(TableMetadataCommonFields, IcebergBaseModel): + """Represents version 2 of the Table Metadata. + + This extends Version 1 with row-level deletes, and adds some additional + information to the schema, such as all the historical schemas, partition-specs, + sort-orders. + + For more information: + https://iceberg.apache.org/spec/#version-2-row-level-deletes + """ + + @model_validator(mode="before") + def cleanup_snapshot_id(cls, data: Dict[str, Any]) -> Dict[str, Any]: + return cleanup_snapshot_id(data) + + @model_validator(mode="after") + def check_schemas(cls, table_metadata: TableMetadata) -> TableMetadata: + return check_schemas(table_metadata) + + @model_validator(mode="after") + def check_partition_specs(cls, table_metadata: TableMetadata) -> TableMetadata: + return check_partition_specs(table_metadata) + + @model_validator(mode="after") + def check_sort_orders(cls, table_metadata: TableMetadata) -> TableMetadata: + return check_sort_orders(table_metadata) + + @model_validator(mode="after") + def construct_refs(cls, table_metadata: TableMetadata) -> TableMetadata: + return construct_refs(table_metadata) + + format_version: Literal[2] = Field(alias="format-version", default=2) + """An integer version number for the format. Currently, this can be 1 or 2 + based on the spec. Implementations must throw an exception if a table’s + version is higher than the supported version.""" + + last_sequence_number: int = Field(alias="last-sequence-number", default=INITIAL_SEQUENCE_NUMBER) + """The table’s highest assigned sequence number, a monotonically + increasing long that tracks the order of snapshots in a table.""" + + +TableMetadata = Annotated[Union[TableMetadataV1, TableMetadataV2], Field(discriminator="format_version")] + + +def new_table_metadata( + schema: Schema, + partition_spec: PartitionSpec, + sort_order: SortOrder, + location: str, + properties: Properties = EMPTY_DICT, + table_uuid: Optional[uuid.UUID] = None, +) -> TableMetadata: + fresh_schema = assign_fresh_schema_ids(schema) + fresh_partition_spec = assign_fresh_partition_spec_ids(partition_spec, schema, fresh_schema) + fresh_sort_order = assign_fresh_sort_order_ids(sort_order, schema, fresh_schema) + + if table_uuid is None: + table_uuid = uuid.uuid4() + + return TableMetadataV2( + location=location, + schemas=[fresh_schema], + last_column_id=fresh_schema.highest_field_id, + current_schema_id=fresh_schema.schema_id, + partition_specs=[fresh_partition_spec], + default_spec_id=fresh_partition_spec.spec_id, + sort_orders=[fresh_sort_order], + default_sort_order_id=fresh_sort_order.order_id, + properties=properties, + last_partition_id=fresh_partition_spec.last_assigned_field_id, + table_uuid=table_uuid, + ) + + +class TableMetadataWrapper(IcebergRootModel[TableMetadata]): + root: TableMetadata + + +class TableMetadataUtil: + """Helper class for parsing TableMetadata.""" + + @staticmethod + def parse_raw(data: str) -> TableMetadata: + try: + return TableMetadataWrapper.model_validate_json(data).root + except PydanticValidationError as e: + raise ValidationError(e) from e + + @staticmethod + def parse_obj(data: Dict[str, Any]) -> TableMetadata: + if "format-version" not in data: + raise ValidationError(f"Missing format-version in TableMetadata: {data}") + format_version = data["format-version"] + + if format_version == 1: + return TableMetadataV1(**data) + elif format_version == 2: + return TableMetadataV2(**data) + else: + raise ValidationError(f"Unknown format version: {format_version}") + + +TableMetadata = Annotated[Union[TableMetadataV1, TableMetadataV2], Field(discriminator="format_version")] # type: ignore diff --git a/pyiceberg/table/refs.py b/pyiceberg/table/refs.py new file mode 100644 index 0000000000..b9692ca975 --- /dev/null +++ b/pyiceberg/table/refs.py @@ -0,0 +1,41 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +from enum import Enum +from typing import Optional + +from pydantic import Field + +from pyiceberg.typedef import IcebergBaseModel + +MAIN_BRANCH = "main" + + +class SnapshotRefType(str, Enum): + BRANCH = "branch" + TAG = "tag" + + def __repr__(self) -> str: + """Return the string representation of the SnapshotRefType class.""" + return f"SnapshotRefType.{self.name}" + + +class SnapshotRef(IcebergBaseModel): + snapshot_id: int = Field(alias="snapshot-id") + snapshot_ref_type: SnapshotRefType = Field(alias="type") + min_snapshots_to_keep: Optional[int] = Field(alias="min-snapshots-to-keep", default=None) + max_snapshot_age_ms: Optional[int] = Field(alias="max-snapshot-age-ms", default=None) + max_ref_age_ms: Optional[int] = Field(alias="max-ref-age-ms", default=None) diff --git a/pyiceberg/table/snapshots.py b/pyiceberg/table/snapshots.py new file mode 100644 index 0000000000..fe828db029 --- /dev/null +++ b/pyiceberg/table/snapshots.py @@ -0,0 +1,118 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +from enum import Enum +from typing import ( + Any, + Dict, + List, + Optional, +) + +from pydantic import Field, PrivateAttr, model_serializer + +from pyiceberg.io import FileIO +from pyiceberg.manifest import ManifestFile, read_manifest_list +from pyiceberg.typedef import IcebergBaseModel + +OPERATION = "operation" + + +class Operation(Enum): + """Describes the operation. + + Possible operation values are: + - append: Only data files were added and no files were removed. + - replace: Data and delete files were added and removed without changing table data; i.e., compaction, changing the data file format, or relocating data files. + - overwrite: Data and delete files were added and removed in a logical overwrite operation. + - delete: Data files were removed and their contents logically deleted and/or delete files were added to delete rows. + """ + + APPEND = "append" + REPLACE = "replace" + OVERWRITE = "overwrite" + DELETE = "delete" + + def __repr__(self) -> str: + """Return the string representation of the Operation class.""" + return f"Operation.{self.name}" + + +class Summary(IcebergBaseModel): + """A class that stores the summary information for a Snapshot. + + The snapshot summary’s operation field is used by some operations, + like snapshot expiration, to skip processing certain snapshots. + """ + + operation: Operation = Field() + _additional_properties: Dict[str, str] = PrivateAttr() + + def __init__(self, operation: Operation, **data: Any) -> None: + super().__init__(operation=operation, **data) + self._additional_properties = data + + @model_serializer + def ser_model(self) -> Dict[str, str]: + return { + "operation": str(self.operation.value), + **self._additional_properties, + } + + @property + def additional_properties(self) -> Dict[str, str]: + return self._additional_properties + + def __repr__(self) -> str: + """Return the string representation of the Summary class.""" + repr_properties = f", **{repr(self._additional_properties)}" if self._additional_properties else "" + return f"Summary({repr(self.operation)}{repr_properties})" + + +class Snapshot(IcebergBaseModel): + snapshot_id: int = Field(alias="snapshot-id") + parent_snapshot_id: Optional[int] = Field(alias="parent-snapshot-id", default=None) + sequence_number: Optional[int] = Field(alias="sequence-number", default=None) + timestamp_ms: int = Field(alias="timestamp-ms") + manifest_list: Optional[str] = Field( + alias="manifest-list", description="Location of the snapshot's manifest list file", default=None + ) + summary: Optional[Summary] = Field(default=None) + schema_id: Optional[int] = Field(alias="schema-id", default=None) + + def __str__(self) -> str: + """Return the string representation of the Snapshot class.""" + operation = f"{self.summary.operation}: " if self.summary else "" + parent_id = f", parent_id={self.parent_snapshot_id}" if self.parent_snapshot_id else "" + schema_id = f", schema_id={self.schema_id}" if self.schema_id is not None else "" + result_str = f"{operation}id={self.snapshot_id}{parent_id}{schema_id}" + return result_str + + def manifests(self, io: FileIO) -> List[ManifestFile]: + if self.manifest_list is not None: + file = io.new_input(self.manifest_list) + return list(read_manifest_list(file)) + return [] + + +class MetadataLogEntry(IcebergBaseModel): + metadata_file: str = Field(alias="metadata-file") + timestamp_ms: int = Field(alias="timestamp-ms") + + +class SnapshotLogEntry(IcebergBaseModel): + snapshot_id: int = Field(alias="snapshot-id") + timestamp_ms: int = Field(alias="timestamp-ms") diff --git a/pyiceberg/table/sorting.py b/pyiceberg/table/sorting.py new file mode 100644 index 0000000000..3a97e39884 --- /dev/null +++ b/pyiceberg/table/sorting.py @@ -0,0 +1,192 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# pylint: disable=keyword-arg-before-vararg +from enum import Enum +from typing import ( + Any, + Callable, + Dict, + List, + Optional, + Union, +) + +from pydantic import ( + BeforeValidator, + Field, + PlainSerializer, + WithJsonSchema, + model_validator, +) +from typing_extensions import Annotated + +from pyiceberg.schema import Schema +from pyiceberg.transforms import IdentityTransform, Transform, parse_transform +from pyiceberg.typedef import IcebergBaseModel +from pyiceberg.types import IcebergType + + +class SortDirection(Enum): + ASC = "asc" + DESC = "desc" + + def __str__(self) -> str: + """Return the string representation of the SortDirection class.""" + return self.name + + def __repr__(self) -> str: + """Return the string representation of the SortDirection class.""" + return f"SortDirection.{self.name}" + + +class NullOrder(Enum): + NULLS_FIRST = "nulls-first" + NULLS_LAST = "nulls-last" + + def __str__(self) -> str: + """Return the string representation of the NullOrder class.""" + return self.name.replace("_", " ") + + def __repr__(self) -> str: + """Return the string representation of the NullOrder class.""" + return f"NullOrder.{self.name}" + + +class SortField(IcebergBaseModel): + """Sort order field. + + Args: + source_id (int): Source column id from the table’s schema. + transform (str): Transform that is used to produce values to be sorted on from the source column. + This is the same transform as described in partition transforms. + direction (SortDirection): Sort direction, that can only be either asc or desc. + null_order (NullOrder): Null order that describes the order of null values when sorted. Can only be either nulls-first or nulls-last. + """ + + def __init__( + self, + source_id: Optional[int] = None, + transform: Optional[Union[Transform[Any, Any], Callable[[IcebergType], Transform[Any, Any]]]] = None, + direction: Optional[SortDirection] = None, + null_order: Optional[NullOrder] = None, + **data: Any, + ): + if source_id is not None: + data["source-id"] = source_id + if transform is not None: + data["transform"] = transform + if direction is not None: + data["direction"] = direction + if null_order is not None: + data["null-order"] = null_order + super().__init__(**data) + + @model_validator(mode="before") + def set_null_order(cls, values: Dict[str, Any]) -> Dict[str, Any]: + values["direction"] = values["direction"] if values.get("direction") else SortDirection.ASC + if not values.get("null-order"): + values["null-order"] = NullOrder.NULLS_FIRST if values["direction"] == SortDirection.ASC else NullOrder.NULLS_LAST + return values + + source_id: int = Field(alias="source-id") + transform: Annotated[ # type: ignore + Transform, + BeforeValidator(parse_transform), + PlainSerializer(lambda c: str(c), return_type=str), # pylint: disable=W0108 + WithJsonSchema({"type": "string"}, mode="serialization"), + ] = Field() + direction: SortDirection = Field() + null_order: NullOrder = Field(alias="null-order") + + def __str__(self) -> str: + """Return the string representation of the SortField class.""" + if type(self.transform) == IdentityTransform: + # In the case of an identity transform, we can omit the transform + return f"{self.source_id} {self.direction} {self.null_order}" + else: + return f"{self.transform}({self.source_id}) {self.direction} {self.null_order}" + + +INITIAL_SORT_ORDER_ID = 1 + + +class SortOrder(IcebergBaseModel): + """Describes how the data is sorted within the table. + + Users can sort their data within partitions by columns to gain performance. + + The order of the sort fields within the list defines the order in which the sort is applied to the data. + + Args: + fields (List[SortField]): The fields how the table is sorted. + + Keyword Args: + order_id (int): An unique id of the sort-order of a table. + """ + + order_id: int = Field(alias="order-id", default=INITIAL_SORT_ORDER_ID) + fields: List[SortField] = Field(default_factory=list) + + def __init__(self, *fields: SortField, **data: Any): + if fields: + data["fields"] = fields + super().__init__(**data) + + @property + def is_unsorted(self) -> bool: + return len(self.fields) == 0 + + def __str__(self) -> str: + """Return the string representation of the SortOrder class.""" + result_str = "[" + if self.fields: + result_str += "\n " + "\n ".join([str(field) for field in self.fields]) + "\n" + result_str += "]" + return result_str + + def __repr__(self) -> str: + """Return the string representation of the SortOrder class.""" + fields = f"{', '.join(repr(column) for column in self.fields)}, " if self.fields else "" + return f"SortOrder({fields}order_id={self.order_id})" + + +UNSORTED_SORT_ORDER_ID = 0 +UNSORTED_SORT_ORDER = SortOrder(order_id=UNSORTED_SORT_ORDER_ID) + + +def assign_fresh_sort_order_ids(sort_order: SortOrder, old_schema: Schema, fresh_schema: Schema) -> SortOrder: + if sort_order.is_unsorted: + return UNSORTED_SORT_ORDER + + fresh_fields = [] + for field in sort_order.fields: + original_field = old_schema.find_column_name(field.source_id) + if original_field is None: + raise ValueError(f"Could not find in old schema: {field}") + fresh_field = fresh_schema.find_field(original_field) + if fresh_field is None: + raise ValueError(f"Could not find field in fresh schema: {original_field}") + fresh_fields.append( + SortField( + source_id=fresh_field.field_id, + transform=field.transform, + direction=field.direction, + null_order=field.null_order, + ) + ) + + return SortOrder(*fresh_fields, order_id=INITIAL_SORT_ORDER_ID) diff --git a/pyiceberg/transforms.py b/pyiceberg/transforms.py new file mode 100644 index 0000000000..9cda219099 --- /dev/null +++ b/pyiceberg/transforms.py @@ -0,0 +1,840 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import base64 +import struct +from abc import ABC, abstractmethod +from enum import IntEnum +from functools import singledispatch +from typing import Any, Callable, Generic, Optional, TypeVar +from typing import Literal as LiteralType +from uuid import UUID + +import mmh3 +from pydantic import Field, PositiveInt, PrivateAttr + +from pyiceberg.expressions import ( + BoundEqualTo, + BoundGreaterThan, + BoundGreaterThanOrEqual, + BoundIn, + BoundLessThan, + BoundLessThanOrEqual, + BoundLiteralPredicate, + BoundNotIn, + BoundNotStartsWith, + BoundPredicate, + BoundSetPredicate, + BoundStartsWith, + BoundTerm, + BoundUnaryPredicate, + EqualTo, + GreaterThanOrEqual, + LessThanOrEqual, + NotStartsWith, + Reference, + StartsWith, + UnboundPredicate, +) +from pyiceberg.expressions.literals import ( + DateLiteral, + DecimalLiteral, + Literal, + LongLiteral, + TimestampLiteral, + literal, +) +from pyiceberg.typedef import IcebergRootModel, L +from pyiceberg.types import ( + BinaryType, + DateType, + DecimalType, + FixedType, + IcebergType, + IntegerType, + LongType, + StringType, + TimestampType, + TimestamptzType, + TimeType, + UUIDType, +) +from pyiceberg.utils import datetime +from pyiceberg.utils.decimal import decimal_to_bytes, truncate_decimal +from pyiceberg.utils.parsing import ParseNumberFromBrackets +from pyiceberg.utils.singleton import Singleton + +S = TypeVar("S") +T = TypeVar("T") + +IDENTITY = "identity" +VOID = "void" +BUCKET = "bucket" +TRUNCATE = "truncate" +YEAR = "year" +MONTH = "month" +DAY = "day" +HOUR = "hour" + +BUCKET_PARSER = ParseNumberFromBrackets(BUCKET) +TRUNCATE_PARSER = ParseNumberFromBrackets(TRUNCATE) + + +def _transform_literal(func: Callable[[L], L], lit: Literal[L]) -> Literal[L]: + """Small helper to upwrap the value from the literal, and wrap it again.""" + return literal(func(lit.value)) + + +def parse_transform(v: Any) -> Any: + if isinstance(v, str): + if v == IDENTITY: + return IdentityTransform() + elif v == VOID: + return VoidTransform() + elif v.startswith(BUCKET): + return BucketTransform(num_buckets=BUCKET_PARSER.match(v)) + elif v.startswith(TRUNCATE): + return TruncateTransform(width=TRUNCATE_PARSER.match(v)) + elif v == YEAR: + return YearTransform() + elif v == MONTH: + return MonthTransform() + elif v == DAY: + return DayTransform() + elif v == HOUR: + return HourTransform() + else: + return UnknownTransform(transform=v) + return v + + +class Transform(IcebergRootModel[str], ABC, Generic[S, T]): + """Transform base class for concrete transforms. + + A base class to transform values and project predicates on partition values. + This class is not used directly. Instead, use one of module method to create the child classes. + """ + + root: str = Field() + + @abstractmethod + def transform(self, source: IcebergType) -> Callable[[Optional[S]], Optional[T]]: + ... + + @abstractmethod + def can_transform(self, source: IcebergType) -> bool: + return False + + @abstractmethod + def result_type(self, source: IcebergType) -> IcebergType: + ... + + @abstractmethod + def project(self, name: str, pred: BoundPredicate[L]) -> Optional[UnboundPredicate[Any]]: + ... + + @property + def preserves_order(self) -> bool: + return False + + def satisfies_order_of(self, other: Any) -> bool: + return self == other + + def to_human_string(self, _: IcebergType, value: Optional[S]) -> str: + return str(value) if value is not None else "null" + + @property + def dedup_name(self) -> str: + return self.__str__() + + def __str__(self) -> str: + """Return the string representation of the Transform class.""" + return self.root + + def __eq__(self, other: Any) -> bool: + """Return the equality of two instances of the Transform class.""" + if isinstance(other, Transform): + return self.root == other.root + return False + + +class BucketTransform(Transform[S, int]): + """Base Transform class to transform a value into a bucket partition value. + + Transforms are parameterized by a number of buckets. Bucket partition transforms use a 32-bit + hash of the source value to produce a positive value by mod the bucket number. + + Args: + num_buckets (int): The number of buckets. + """ + + root: str = Field() + _num_buckets: PositiveInt = PrivateAttr() + + def __init__(self, num_buckets: int, **data: Any) -> None: + self._num_buckets = num_buckets + super().__init__(f"bucket[{num_buckets}]", **data) + + @property + def num_buckets(self) -> int: + return self._num_buckets + + def hash(self, value: S) -> int: + raise NotImplementedError() + + def apply(self, value: Optional[S]) -> Optional[int]: + return (self.hash(value) & IntegerType.max) % self._num_buckets if value else None + + def result_type(self, source: IcebergType) -> IcebergType: + return IntegerType() + + def project(self, name: str, pred: BoundPredicate[L]) -> Optional[UnboundPredicate[Any]]: + transformer = self.transform(pred.term.ref().field.field_type) + + if isinstance(pred.term, BoundTransform): + return _project_transform_predicate(self, name, pred) + elif isinstance(pred, BoundUnaryPredicate): + return pred.as_unbound(Reference(name)) + elif isinstance(pred, BoundEqualTo): + return pred.as_unbound(Reference(name), _transform_literal(transformer, pred.literal)) + elif isinstance(pred, BoundIn): # NotIn can't be projected + return pred.as_unbound(Reference(name), {_transform_literal(transformer, literal) for literal in pred.literals}) + else: + # - Comparison predicates can't be projected, notEq can't be projected + # - Small ranges can be projected: + # For example, (x > 0) and (x < 3) can be turned into in({1, 2}) and projected. + return None + + def can_transform(self, source: IcebergType) -> bool: + return type(source) in { + IntegerType, + DateType, + LongType, + TimeType, + TimestampType, + TimestamptzType, + DecimalType, + StringType, + FixedType, + BinaryType, + UUIDType, + } + + def transform(self, source: IcebergType, bucket: bool = True) -> Callable[[Optional[Any]], Optional[int]]: + source_type = type(source) + if source_type in {IntegerType, LongType, DateType, TimeType, TimestampType, TimestamptzType}: + + def hash_func(v: Any) -> int: + return mmh3.hash(struct.pack(" int: + return mmh3.hash(decimal_to_bytes(v)) + + elif source_type in {StringType, FixedType, BinaryType}: + + def hash_func(v: Any) -> int: + return mmh3.hash(v) + + elif source_type == UUIDType: + + def hash_func(v: Any) -> int: + if isinstance(v, UUID): + return mmh3.hash(v.bytes) + return mmh3.hash(v) + + else: + raise ValueError(f"Unknown type {source}") + + if bucket: + return lambda v: (hash_func(v) & IntegerType.max) % self._num_buckets if v else None + return hash_func + + def __repr__(self) -> str: + """Return the string representation of the BucketTransform class.""" + return f"BucketTransform(num_buckets={self._num_buckets})" + + +class TimeResolution(IntEnum): + YEAR = 6 + MONTH = 5 + WEEK = 4 + DAY = 3 + HOUR = 2 + MINUTE = 1 + SECOND = 0 + + +class TimeTransform(Transform[S, int], Generic[S], Singleton): + @property + @abstractmethod + def granularity(self) -> TimeResolution: + ... + + def satisfies_order_of(self, other: Transform[S, T]) -> bool: + return self.granularity <= other.granularity if hasattr(other, "granularity") else False + + def result_type(self, source: IcebergType) -> IntegerType: + return IntegerType() + + @abstractmethod + def transform(self, source: IcebergType) -> Callable[[Optional[Any]], Optional[int]]: + ... + + def project(self, name: str, pred: BoundPredicate[L]) -> Optional[UnboundPredicate[Any]]: + transformer = self.transform(pred.term.ref().field.field_type) + if isinstance(pred.term, BoundTransform): + return _project_transform_predicate(self, name, pred) + elif isinstance(pred, BoundUnaryPredicate): + return pred.as_unbound(Reference(name)) + elif isinstance(pred, BoundLiteralPredicate): + return _truncate_number(name, pred, transformer) + elif isinstance(pred, BoundIn): # NotIn can't be projected + return _set_apply_transform(name, pred, transformer) + else: + return None + + @property + def dedup_name(self) -> str: + return "time" + + @property + def preserves_order(self) -> bool: + return True + + +class YearTransform(TimeTransform[S]): + """Transforms a datetime value into a year value. + + Example: + >>> transform = YearTransform() + >>> transform.transform(TimestampType())(1512151975038194) + 47 + """ + + root: LiteralType["year"] = Field(default="year") # noqa: F821 + + def transform(self, source: IcebergType) -> Callable[[Optional[S]], Optional[int]]: + source_type = type(source) + if source_type == DateType: + + def year_func(v: Any) -> int: + return datetime.days_to_years(v) + + elif source_type in {TimestampType, TimestamptzType}: + + def year_func(v: Any) -> int: + return datetime.micros_to_years(v) + + else: + raise ValueError(f"Cannot apply year transform for type: {source}") + + return lambda v: year_func(v) if v is not None else None + + def can_transform(self, source: IcebergType) -> bool: + return type(source) in { + DateType, + TimestampType, + TimestamptzType, + } + + @property + def granularity(self) -> TimeResolution: + return TimeResolution.YEAR + + def to_human_string(self, _: IcebergType, value: Optional[S]) -> str: + return datetime.to_human_year(value) if isinstance(value, int) else "null" + + def __repr__(self) -> str: + """Return the string representation of the YearTransform class.""" + return "YearTransform()" + + +class MonthTransform(TimeTransform[S]): + """Transforms a datetime value into a month value. + + Example: + >>> transform = MonthTransform() + >>> transform.transform(DateType())(17501) + 575 + """ + + root: LiteralType["month"] = Field(default="month") # noqa: F821 + + def transform(self, source: IcebergType) -> Callable[[Optional[S]], Optional[int]]: + source_type = type(source) + if source_type == DateType: + + def month_func(v: Any) -> int: + return datetime.days_to_months(v) + + elif source_type in {TimestampType, TimestamptzType}: + + def month_func(v: Any) -> int: + return datetime.micros_to_months(v) + + else: + raise ValueError(f"Cannot apply month transform for type: {source}") + + return lambda v: month_func(v) if v else None + + def can_transform(self, source: IcebergType) -> bool: + return type(source) in { + DateType, + TimestampType, + TimestamptzType, + } + + @property + def granularity(self) -> TimeResolution: + return TimeResolution.MONTH + + def to_human_string(self, _: IcebergType, value: Optional[S]) -> str: + return datetime.to_human_month(value) if isinstance(value, int) else "null" + + def __repr__(self) -> str: + """Return the string representation of the MonthTransform class.""" + return "MonthTransform()" + + +class DayTransform(TimeTransform[S]): + """Transforms a datetime value into a day value. + + Example: + >>> transform = MonthTransform() + >>> transform.transform(DateType())(17501) + 17501 + """ + + root: LiteralType["day"] = Field(default="day") # noqa: F821 + + def transform(self, source: IcebergType) -> Callable[[Optional[S]], Optional[int]]: + source_type = type(source) + if source_type == DateType: + + def day_func(v: Any) -> int: + return v + + elif source_type in {TimestampType, TimestamptzType}: + + def day_func(v: Any) -> int: + return datetime.micros_to_days(v) + + else: + raise ValueError(f"Cannot apply day transform for type: {source}") + + return lambda v: day_func(v) if v else None + + def can_transform(self, source: IcebergType) -> bool: + return type(source) in { + DateType, + TimestampType, + TimestamptzType, + } + + def result_type(self, source: IcebergType) -> IcebergType: + return DateType() + + @property + def granularity(self) -> TimeResolution: + return TimeResolution.DAY + + def to_human_string(self, _: IcebergType, value: Optional[S]) -> str: + return datetime.to_human_day(value) if isinstance(value, int) else "null" + + def __repr__(self) -> str: + """Return the string representation of the DayTransform class.""" + return "DayTransform()" + + +class HourTransform(TimeTransform[S]): + """Transforms a datetime value into a hour value. + + Example: + >>> transform = HourTransform() + >>> transform.transform(TimestampType())(1512151975038194) + 420042 + """ + + root: LiteralType["hour"] = Field(default="hour") # noqa: F821 + + def transform(self, source: IcebergType) -> Callable[[Optional[S]], Optional[int]]: + if type(source) in {TimestampType, TimestamptzType}: + + def hour_func(v: Any) -> int: + return datetime.micros_to_hours(v) + + else: + raise ValueError(f"Cannot apply hour transform for type: {source}") + + return lambda v: hour_func(v) if v else None + + def can_transform(self, source: IcebergType) -> bool: + return type(source) in { + TimestampType, + TimestamptzType, + } + + @property + def granularity(self) -> TimeResolution: + return TimeResolution.HOUR + + def to_human_string(self, _: IcebergType, value: Optional[S]) -> str: + return datetime.to_human_hour(value) if isinstance(value, int) else "null" + + def __repr__(self) -> str: + """Return the string representation of the HourTransform class.""" + return "HourTransform()" + + +def _base64encode(buffer: bytes) -> str: + """Convert bytes to base64 string.""" + return base64.b64encode(buffer).decode("ISO-8859-1") + + +class IdentityTransform(Transform[S, S]): + """Transforms a value into itself. + + Example: + >>> transform = IdentityTransform() + >>> transform.transform(StringType())('hello-world') + 'hello-world' + """ + + root: LiteralType["identity"] = Field(default="identity") # noqa: F821 + + def __init__(self) -> None: + super().__init__("identity") + + def transform(self, source: IcebergType) -> Callable[[Optional[S]], Optional[S]]: + return lambda v: v + + def can_transform(self, source: IcebergType) -> bool: + return source.is_primitive + + def result_type(self, source: IcebergType) -> IcebergType: + return source + + def project(self, name: str, pred: BoundPredicate[L]) -> Optional[UnboundPredicate[Any]]: + if isinstance(pred.term, BoundTransform): + return _project_transform_predicate(self, name, pred) + elif isinstance(pred, BoundUnaryPredicate): + return pred.as_unbound(Reference(name)) + elif isinstance(pred, BoundLiteralPredicate): + return pred.as_unbound(Reference(name), pred.literal) + elif isinstance(pred, (BoundIn, BoundNotIn)): + return pred.as_unbound(Reference(name), pred.literals) + else: + raise ValueError(f"Could not project: {pred}") + + @property + def preserves_order(self) -> bool: + return True + + def satisfies_order_of(self, other: Transform[S, T]) -> bool: + """Ordering by value is the same as long as the other preserves order.""" + return other.preserves_order + + def to_human_string(self, source_type: IcebergType, value: Optional[S]) -> str: + return _human_string(value, source_type) if value is not None else "null" + + def __str__(self) -> str: + """Return the string representation of the IdentityTransform class.""" + return "identity" + + def __repr__(self) -> str: + """Return the string representation of the IdentityTransform class.""" + return "IdentityTransform()" + + +class TruncateTransform(Transform[S, S]): + """A transform for truncating a value to a specified width. + + Args: + width (int): The truncate width, should be positive. + Raises: + ValueError: If a type is provided that is incompatible with a Truncate transform. + """ + + root: str = Field() + _source_type: IcebergType = PrivateAttr() + _width: PositiveInt = PrivateAttr() + + def __init__(self, width: int, **data: Any): + super().__init__(root=f"truncate[{width}]", **data) + self._width = width + + def can_transform(self, source: IcebergType) -> bool: + return type(source) in {IntegerType, LongType, StringType, BinaryType, DecimalType} + + def result_type(self, source: IcebergType) -> IcebergType: + return source + + @property + def preserves_order(self) -> bool: + return True + + @property + def source_type(self) -> IcebergType: + return self._source_type + + def project(self, name: str, pred: BoundPredicate[L]) -> Optional[UnboundPredicate[Any]]: + field_type = pred.term.ref().field.field_type + + if isinstance(pred.term, BoundTransform): + return _project_transform_predicate(self, name, pred) + + if isinstance(pred, BoundUnaryPredicate): + return pred.as_unbound(Reference(name)) + elif isinstance(pred, BoundIn): + return _set_apply_transform(name, pred, self.transform(field_type)) + elif isinstance(field_type, (IntegerType, LongType, DecimalType)): + if isinstance(pred, BoundLiteralPredicate): + return _truncate_number(name, pred, self.transform(field_type)) + elif isinstance(field_type, (BinaryType, StringType)): + if isinstance(pred, BoundLiteralPredicate): + return _truncate_array(name, pred, self.transform(field_type)) + return None + + @property + def width(self) -> int: + return self._width + + def transform(self, source: IcebergType) -> Callable[[Optional[S]], Optional[S]]: + source_type = type(source) + if source_type in {IntegerType, LongType}: + + def truncate_func(v: Any) -> Any: + return v - v % self._width + + elif source_type in {StringType, BinaryType}: + + def truncate_func(v: Any) -> Any: + return v[0 : min(self._width, len(v))] + + elif source_type == DecimalType: + + def truncate_func(v: Any) -> Any: + return truncate_decimal(v, self._width) + + else: + raise ValueError(f"Cannot truncate for type: {source}") + + return lambda v: truncate_func(v) if v else None + + def satisfies_order_of(self, other: Transform[S, T]) -> bool: + if self == other: + return True + elif ( + isinstance(self.source_type, StringType) + and isinstance(other, TruncateTransform) + and isinstance(other.source_type, StringType) + ): + return self.width >= other.width + + return False + + def to_human_string(self, _: IcebergType, value: Optional[S]) -> str: + if value is None: + return "null" + elif isinstance(value, bytes): + return _base64encode(value) + else: + return str(value) + + def __repr__(self) -> str: + """Return the string representation of the TruncateTransform class.""" + return f"TruncateTransform(width={self._width})" + + +@singledispatch +def _human_string(value: Any, _type: IcebergType) -> str: + return str(value) + + +@_human_string.register(bytes) +def _(value: bytes, _type: IcebergType) -> str: + return _base64encode(value) + + +@_human_string.register(int) +def _(value: int, _type: IcebergType) -> str: + return _int_to_human_string(_type, value) + + +@singledispatch +def _int_to_human_string(_type: IcebergType, value: int) -> str: + return str(value) + + +@_int_to_human_string.register(DateType) +def _(_type: IcebergType, value: int) -> str: + return datetime.to_human_day(value) + + +@_int_to_human_string.register(TimeType) +def _(_type: IcebergType, value: int) -> str: + return datetime.to_human_time(value) + + +@_int_to_human_string.register(TimestampType) +def _(_type: IcebergType, value: int) -> str: + return datetime.to_human_timestamp(value) + + +@_int_to_human_string.register(TimestamptzType) +def _(_type: IcebergType, value: int) -> str: + return datetime.to_human_timestamptz(value) + + +class UnknownTransform(Transform[S, T]): + """A transform that represents when an unknown transform is provided. + + Args: + transform (str): A string name of a transform. + + Keyword Args: + source_type (IcebergType): An Iceberg `Type`. + """ + + root: LiteralType["unknown"] = Field(default="unknown") # noqa: F821 + _transform: str = PrivateAttr() + + def __init__(self, transform: str, **data: Any): + super().__init__(**data) + self._transform = transform + + def transform(self, source: IcebergType) -> Callable[[Optional[S]], Optional[T]]: + raise AttributeError(f"Cannot apply unsupported transform: {self}") + + def can_transform(self, source: IcebergType) -> bool: + return False + + def result_type(self, source: IcebergType) -> StringType: + return StringType() + + def project(self, name: str, pred: BoundPredicate[L]) -> Optional[UnboundPredicate[Any]]: + return None + + def __repr__(self) -> str: + """Return the string representation of the UnknownTransform class.""" + return f"UnknownTransform(transform={repr(self._transform)})" + + +class VoidTransform(Transform[S, None], Singleton): + """A transform that always returns None.""" + + root: str = "void" + + def transform(self, source: IcebergType) -> Callable[[Optional[S]], Optional[T]]: + return lambda v: None + + def can_transform(self, _: IcebergType) -> bool: + return True + + def result_type(self, source: IcebergType) -> IcebergType: + return source + + def project(self, name: str, pred: BoundPredicate[L]) -> Optional[UnboundPredicate[Any]]: + return None + + def to_human_string(self, _: IcebergType, value: Optional[S]) -> str: + return "null" + + def __repr__(self) -> str: + """Return the string representation of the VoidTransform class.""" + return "VoidTransform()" + + +def _truncate_number( + name: str, pred: BoundLiteralPredicate[L], transform: Callable[[Optional[L]], Optional[L]] +) -> Optional[UnboundPredicate[Any]]: + boundary = pred.literal + + if not isinstance(boundary, (LongLiteral, DecimalLiteral, DateLiteral, TimestampLiteral)): + raise ValueError(f"Expected a numeric literal, got: {type(boundary)}") + + if isinstance(pred, BoundLessThan): + return LessThanOrEqual(Reference(name), _transform_literal(transform, boundary.decrement())) # type: ignore + elif isinstance(pred, BoundLessThanOrEqual): + return LessThanOrEqual(Reference(name), _transform_literal(transform, boundary)) + elif isinstance(pred, BoundGreaterThan): + return GreaterThanOrEqual(Reference(name), _transform_literal(transform, boundary.increment())) # type: ignore + elif isinstance(pred, BoundGreaterThanOrEqual): + return GreaterThanOrEqual(Reference(name), _transform_literal(transform, boundary)) + elif isinstance(pred, BoundEqualTo): + return EqualTo(Reference(name), _transform_literal(transform, boundary)) + else: + return None + + +def _truncate_array( + name: str, pred: BoundLiteralPredicate[L], transform: Callable[[Optional[L]], Optional[L]] +) -> Optional[UnboundPredicate[Any]]: + boundary = pred.literal + + if type(pred) in {BoundLessThan, BoundLessThanOrEqual}: + return LessThanOrEqual(Reference(name), _transform_literal(transform, boundary)) + elif type(pred) in {BoundGreaterThan, BoundGreaterThanOrEqual}: + return GreaterThanOrEqual(Reference(name), _transform_literal(transform, boundary)) + if isinstance(pred, BoundEqualTo): + return EqualTo(Reference(name), _transform_literal(transform, boundary)) + elif isinstance(pred, BoundStartsWith): + return StartsWith(Reference(name), _transform_literal(transform, boundary)) + elif isinstance(pred, BoundNotStartsWith): + return NotStartsWith(Reference(name), _transform_literal(transform, boundary)) + else: + return None + + +def _project_transform_predicate( + transform: Transform[Any, Any], partition_name: str, pred: BoundPredicate[L] +) -> Optional[UnboundPredicate[Any]]: + term = pred.term + if isinstance(term, BoundTransform) and transform == term.transform: + return _remove_transform(partition_name, pred) + return None + + +def _remove_transform(partition_name: str, pred: BoundPredicate[L]) -> UnboundPredicate[Any]: + if isinstance(pred, BoundUnaryPredicate): + return pred.as_unbound(Reference(partition_name)) + elif isinstance(pred, BoundLiteralPredicate): + return pred.as_unbound(Reference(partition_name), pred.literal) + elif isinstance(pred, (BoundIn, BoundNotIn)): + return pred.as_unbound(Reference(partition_name), pred.literals) + else: + raise ValueError(f"Cannot replace transform in unknown predicate: {pred}") + + +def _set_apply_transform(name: str, pred: BoundSetPredicate[L], transform: Callable[[L], L]) -> UnboundPredicate[Any]: + literals = pred.literals + if isinstance(pred, BoundSetPredicate): + return pred.as_unbound(Reference(name), {_transform_literal(transform, literal) for literal in literals}) + else: + raise ValueError(f"Unknown BoundSetPredicate: {pred}") + + +class BoundTransform(BoundTerm[L]): + """A transform expression.""" + + transform: Transform[L, Any] + + def __init__(self, term: BoundTerm[L], transform: Transform[L, Any]): + self.term: BoundTerm[L] = term + self.transform = transform diff --git a/pyiceberg/typedef.py b/pyiceberg/typedef.py new file mode 100644 index 0000000000..ff2a6d1cb0 --- /dev/null +++ b/pyiceberg/typedef.py @@ -0,0 +1,199 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +from __future__ import annotations + +from abc import abstractmethod +from decimal import Decimal +from functools import lru_cache +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Dict, + Generic, + List, + Optional, + Protocol, + Set, + Tuple, + TypeVar, + Union, + runtime_checkable, +) +from uuid import UUID + +from pydantic import BaseModel, ConfigDict, RootModel + +if TYPE_CHECKING: + from pyiceberg.types import StructType + + +class FrozenDict(Dict[Any, Any]): + def __setitem__(self, instance: Any, value: Any) -> None: + """Assign a value to a FrozenDict.""" + raise AttributeError("FrozenDict does not support assignment") + + def update(self, *args: Any, **kwargs: Any) -> None: + raise AttributeError("FrozenDict does not support .update()") + + +EMPTY_DICT = FrozenDict() + +K = TypeVar("K") +V = TypeVar("V") + + +# from https://stackoverflow.com/questions/2912231/is-there-a-clever-way-to-pass-the-key-to-defaultdicts-default-factory +class KeyDefaultDict(Dict[K, V]): + def __init__(self, default_factory: Callable[[K], V]): + super().__init__() + self.default_factory = default_factory + + def __missing__(self, key: K) -> V: + """Define behavior if you access a non-existent key in a KeyDefaultDict.""" + val = self.default_factory(key) + self[key] = val + return val + + +Identifier = Tuple[str, ...] +Properties = Dict[str, str] +RecursiveDict = Dict[str, Union[str, "RecursiveDict"]] + +# Represents the literal value +L = TypeVar("L", str, bool, int, float, bytes, UUID, Decimal, covariant=True) + + +@runtime_checkable +class StructProtocol(Protocol): # pragma: no cover + """A generic protocol used by accessors to get and set at positions of an object.""" + + @abstractmethod + def __getitem__(self, pos: int) -> Any: + """Fetch a value from a StructProtocol.""" + + @abstractmethod + def __setitem__(self, pos: int, value: Any) -> None: + """Assign a value to a StructProtocol.""" + + +class IcebergBaseModel(BaseModel): + """ + This class extends the Pydantic BaseModel to set default values by overriding them. + + This is because we always want to set by_alias to True. In Python, the dash can't + be used in variable names, and this is used throughout the Iceberg spec. + + The same goes for exclude_none, if a field is None we want to omit it from + serialization, for example, the doc attribute on the NestedField object. + Default non-null values will be serialized. + + This is recommended by Pydantic: + https://pydantic-docs.helpmanual.io/usage/model_config/#change-behaviour-globally + """ + + model_config = ConfigDict(populate_by_name=True, frozen=True) + + def _exclude_private_properties(self, exclude: Optional[Set[str]] = None) -> Set[str]: + # A small trick to exclude private properties. Properties are serialized by pydantic, + # regardless if they start with an underscore. + # This will look at the dict, and find the fields and exclude them + return set.union( + {field for field in self.__dict__ if field.startswith("_") and not field == "__root__"}, exclude or set() + ) + + def model_dump( + self, exclude_none: bool = True, exclude: Optional[Set[str]] = None, by_alias: bool = True, **kwargs: Any + ) -> Dict[str, Any]: + return super().model_dump( + exclude_none=exclude_none, exclude=self._exclude_private_properties(exclude), by_alias=by_alias, **kwargs + ) + + def model_dump_json( + self, exclude_none: bool = True, exclude: Optional[Set[str]] = None, by_alias: bool = True, **kwargs: Any + ) -> str: + return super().model_dump_json( + exclude_none=exclude_none, exclude=self._exclude_private_properties(exclude), by_alias=by_alias, **kwargs + ) + + +T = TypeVar("T") + + +class IcebergRootModel(RootModel[T], Generic[T]): + """ + This class extends the Pydantic BaseModel to set default values by overriding them. + + This is because we always want to set by_alias to True. In Python, the dash can't + be used in variable names, and this is used throughout the Iceberg spec. + + The same goes for exclude_none, if a field is None we want to omit it from + serialization, for example, the doc attribute on the NestedField object. + Default non-null values will be serialized. + + This is recommended by Pydantic: + https://pydantic-docs.helpmanual.io/usage/model_config/#change-behaviour-globally + """ + + model_config = ConfigDict(frozen=True) + + +@lru_cache +def _get_struct_fields(struct_type: StructType) -> Tuple[str, ...]: + return tuple([field.name for field in struct_type.fields]) + + +class Record(StructProtocol): + __slots__ = ("_position_to_field_name",) + _position_to_field_name: Tuple[str, ...] + + def __init__(self, *data: Any, struct: Optional[StructType] = None, **named_data: Any) -> None: + if struct is not None: + self._position_to_field_name = _get_struct_fields(struct) + elif named_data: + # Order of named_data is preserved (PEP 468) so this can be used to generate the position dict + self._position_to_field_name = tuple(named_data.keys()) + else: + self._position_to_field_name = tuple(f"field{idx + 1}" for idx in range(len(data))) + + for idx, d in enumerate(data): + self[idx] = d + + for field_name, d in named_data.items(): + self.__setattr__(field_name, d) + + def __setitem__(self, pos: int, value: Any) -> None: + """Assign a value to a Record.""" + self.__setattr__(self._position_to_field_name[pos], value) + + def __getitem__(self, pos: int) -> Any: + """Fetch a value from a Record.""" + return self.__getattribute__(self._position_to_field_name[pos]) + + def __eq__(self, other: Any) -> bool: + """Return the equality of two instances of the Record class.""" + if not isinstance(other, Record): + return False + return self.__dict__ == other.__dict__ + + def __repr__(self) -> str: + """Return the string representation of the Record class.""" + return f"{self.__class__.__name__}[{', '.join(f'{key}={repr(value)}' for key, value in self.__dict__.items() if not key.startswith('_'))}]" + + def record_fields(self) -> List[str]: + """Return values of all the fields of the Record class except those specified in skip_fields.""" + return [self.__getattribute__(v) if hasattr(self, v) else None for v in self._position_to_field_name] diff --git a/pyiceberg/types.py b/pyiceberg/types.py new file mode 100644 index 0000000000..12ea831f08 --- /dev/null +++ b/pyiceberg/types.py @@ -0,0 +1,708 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +"""Data types used in describing Iceberg schemas. + +This module implements the data types described in the Iceberg specification for Iceberg schemas. To +describe an Iceberg table schema, these classes can be used in the construction of a StructType instance. + +Example: + >>> str(StructType( + ... NestedField(1, "required_field", StringType(), True), + ... NestedField(2, "optional_field", IntegerType()) + ... )) + 'struct<1: required_field: optional string, 2: optional_field: optional int>' + +Notes: + - https://iceberg.apache.org/#spec/#primitive-types +""" +from __future__ import annotations + +import re +from functools import cached_property +from typing import ( + Any, + ClassVar, + Literal, + Optional, + Tuple, +) + +from pydantic import ( + Field, + PrivateAttr, + SerializeAsAny, + model_serializer, + model_validator, +) +from pydantic_core.core_schema import ValidatorFunctionWrapHandler + +from pyiceberg.exceptions import ValidationError +from pyiceberg.typedef import IcebergBaseModel, IcebergRootModel +from pyiceberg.utils.parsing import ParseNumberFromBrackets +from pyiceberg.utils.singleton import Singleton + +DECIMAL_REGEX = re.compile(r"decimal\((\d+),\s*(\d+)\)") +FIXED = "fixed" +FIXED_PARSER = ParseNumberFromBrackets(FIXED) + + +def _parse_decimal_type(decimal: Any) -> Tuple[int, int]: + if isinstance(decimal, str): + matches = DECIMAL_REGEX.search(decimal) + if matches: + return int(matches.group(1)), int(matches.group(2)) + else: + raise ValidationError(f"Could not parse {decimal} into a DecimalType") + elif isinstance(decimal, dict): + return decimal["precision"], decimal["scale"] + else: + return decimal + + +def _parse_fixed_type(fixed: Any) -> int: + if isinstance(fixed, str): + return FIXED_PARSER.match(fixed) + elif isinstance(fixed, dict): + return fixed["length"] + else: + return fixed + + +class IcebergType(IcebergBaseModel): + """Base type for all Iceberg Types. + + Example: + >>> str(IcebergType()) + 'IcebergType()' + >>> repr(IcebergType()) + 'IcebergType()' + """ + + @model_validator(mode="wrap") + @classmethod + def handle_primitive_type(cls, v: Any, handler: ValidatorFunctionWrapHandler) -> IcebergType: + # Pydantic works mostly around dicts, and there seems to be something + # by not serializing into a RootModel, might revisit this. + if isinstance(v, str): + if v == "boolean": + return BooleanType() + elif v == "string": + return StringType() + elif v == "int": + return IntegerType() + elif v == "long": + return LongType() + if v == "float": + return FloatType() + if v == "double": + return DoubleType() + if v == "timestamp": + return TimestampType() + if v == "timestamptz": + return TimestamptzType() + if v == "date": + return DateType() + if v == "time": + return TimeType() + if v == "uuid": + return UUIDType() + if v == "binary": + return BinaryType() + if v.startswith("fixed"): + return FixedType(_parse_fixed_type(v)) + if v.startswith("decimal"): + precision, scale = _parse_decimal_type(v) + return DecimalType(precision, scale) + else: + raise ValueError(f"Unknown type: {v}") + if isinstance(v, dict) and cls == IcebergType: + complex_type = v.get("type") + if complex_type == "list": + return ListType(**v) + elif complex_type == "map": + return MapType(**v) + elif complex_type == "struct": + return StructType(**v) + else: + return NestedField(**v) + return handler(v) + + @property + def is_primitive(self) -> bool: + return isinstance(self, PrimitiveType) + + @property + def is_struct(self) -> bool: + return isinstance(self, StructType) + + +class PrimitiveType(IcebergRootModel[str], IcebergType, Singleton): + """Base class for all Iceberg Primitive Types.""" + + root: Any = Field() + + def __repr__(self) -> str: + """Return the string representation of the PrimitiveType class.""" + return f"{type(self).__name__}()" + + def __str__(self) -> str: + """Return the string representation of the PrimitiveType class.""" + return self.root + + +class FixedType(PrimitiveType): + """A fixed data type in Iceberg. + + Example: + >>> FixedType(8) + FixedType(length=8) + >>> FixedType(8) == FixedType(8) + True + >>> FixedType(19) == FixedType(25) + False + """ + + root: int = Field() + + def __init__(self, length: int) -> None: + super().__init__(root=length) + + @model_serializer + def ser_model(self) -> str: + return f"fixed[{self.root}]" + + def __len__(self) -> int: + """Return the length of an instance of the FixedType class.""" + return self.root + + def __str__(self) -> str: + """Return the string representation.""" + return f"fixed[{self.root}]" + + def __repr__(self) -> str: + """Return the string representation of the FixedType class.""" + return f"FixedType(length={self.root})" + + def __getnewargs__(self) -> tuple[int]: + """Pickle the FixedType class.""" + return (self.root,) + + +class DecimalType(PrimitiveType): + """A decimal data type in Iceberg. + + Example: + >>> DecimalType(32, 3) + DecimalType(precision=32, scale=3) + >>> DecimalType(8, 3) == DecimalType(8, 3) + True + """ + + root: Tuple[int, int] + + def __init__(self, precision: int, scale: int) -> None: + super().__init__(root=(precision, scale)) + + @model_serializer + def ser_model(self) -> str: + """Serialize the model to a string.""" + return f"decimal({self.precision}, {self.scale})" + + @property + def precision(self) -> int: + """Return the precision of the decimal.""" + return self.root[0] + + @property + def scale(self) -> int: + """Return the scale of the decimal.""" + return self.root[1] + + def __repr__(self) -> str: + """Return the string representation of the DecimalType class.""" + return f"DecimalType(precision={self.precision}, scale={self.scale})" + + def __str__(self) -> str: + """Return the string representation.""" + return f"decimal({self.precision}, {self.scale})" + + def __hash__(self) -> int: + """Return the hash of the tuple.""" + return hash(self.root) + + def __getnewargs__(self) -> Tuple[int, int]: + """Pickle the DecimalType class.""" + return self.precision, self.scale + + def __eq__(self, other: Any) -> bool: + """Compare to root to another object.""" + return self.root == other.root if isinstance(other, DecimalType) else False + + +class NestedField(IcebergType): + """Represents a field of a struct, a map key, a map value, or a list element. + + This is where field IDs, names, docs, and nullability are tracked. + + Example: + >>> str(NestedField( + ... field_id=1, + ... name='foo', + ... field_type=FixedType(22), + ... required=False, + ... )) + '1: foo: optional fixed[22]' + >>> str(NestedField( + ... field_id=2, + ... name='bar', + ... field_type=LongType(), + ... is_optional=False, + ... doc="Just a long" + ... )) + '2: bar: required long (Just a long)' + """ + + field_id: int = Field(alias="id") + name: str = Field() + field_type: SerializeAsAny[IcebergType] = Field(alias="type") + required: bool = Field(default=True) + doc: Optional[str] = Field(default=None, repr=False) + initial_default: Optional[Any] = Field(alias="initial-default", default=None, repr=False) + + def __init__( + self, + field_id: Optional[int] = None, + name: Optional[str] = None, + field_type: Optional[IcebergType] = None, + required: bool = True, + doc: Optional[str] = None, + initial_default: Optional[Any] = None, + **data: Any, + ): + # We need an init when we want to use positional arguments, but + # need also to support the aliases. + data["id"] = data["id"] if "id" in data else field_id + data["name"] = name + data["type"] = data["type"] if "type" in data else field_type + data["required"] = required + data["doc"] = doc + data["initial-default"] = initial_default + super().__init__(**data) + + def __str__(self) -> str: + """Return the string representation of the NestedField class.""" + doc = "" if not self.doc else f" ({self.doc})" + req = "required" if self.required else "optional" + return f"{self.field_id}: {self.name}: {req} {self.field_type}{doc}" + + def __getnewargs__(self) -> Tuple[int, str, IcebergType, bool, Optional[str]]: + """Pickle the NestedField class.""" + return (self.field_id, self.name, self.field_type, self.required, self.doc) + + @property + def optional(self) -> bool: + return not self.required + + +class StructType(IcebergType): + """A struct type in Iceberg. + + Example: + >>> str(StructType( + ... NestedField(1, "required_field", StringType(), True), + ... NestedField(2, "optional_field", IntegerType()) + ... )) + 'struct<1: required_field: optional string, 2: optional_field: optional int>' + """ + + type: Literal["struct"] = Field(default="struct") + fields: Tuple[NestedField, ...] = Field(default_factory=tuple) + _hash: int = PrivateAttr() + + def __init__(self, *fields: NestedField, **data: Any): + # In case we use positional arguments, instead of keyword args + if fields: + data["fields"] = fields + super().__init__(**data) + self._hash = hash(self.fields) + + def field(self, field_id: int) -> Optional[NestedField]: + for field in self.fields: + if field.field_id == field_id: + return field + return None + + def __str__(self) -> str: + """Return the string representation of the StructType class.""" + return f"struct<{', '.join(map(str, self.fields))}>" + + def __repr__(self) -> str: + """Return the string representation of the StructType class.""" + return f"StructType(fields=({', '.join(map(repr, self.fields))},))" + + def __len__(self) -> int: + """Return the length of an instance of the StructType class.""" + return len(self.fields) + + def __getnewargs__(self) -> Tuple[NestedField, ...]: + """Pickle the StructType class.""" + return self.fields + + def __hash__(self) -> int: + """Use the cache hash value of the StructType class.""" + return self._hash + + def __eq__(self, other: Any) -> bool: + """Compare the object if it is equal to another object.""" + return self.fields == other.fields if isinstance(other, StructType) else False + + +class ListType(IcebergType): + """A list type in Iceberg. + + Example: + >>> ListType(element_id=3, element_type=StringType(), element_required=True) + ListType(element_id=3, element_type=StringType(), element_required=True) + """ + + type: Literal["list"] = Field(default="list") + element_id: int = Field(alias="element-id") + element_type: SerializeAsAny[IcebergType] = Field(alias="element") + element_required: bool = Field(alias="element-required", default=True) + _element_field: NestedField = PrivateAttr() + _hash: int = PrivateAttr() + + def __init__( + self, element_id: Optional[int] = None, element: Optional[IcebergType] = None, element_required: bool = True, **data: Any + ): + data["element-id"] = data["element-id"] if "element-id" in data else element_id + data["element"] = element or data["element_type"] + data["element-required"] = data["element-required"] if "element-required" in data else element_required + super().__init__(**data) + self._hash = hash(data.values()) + + @cached_property + def element_field(self) -> NestedField: + return NestedField( + name="element", + field_id=self.element_id, + field_type=self.element_type, + required=self.element_required, + ) + + def __str__(self) -> str: + """Return the string representation of the ListType class.""" + return f"list<{self.element_type}>" + + def __getnewargs__(self) -> Tuple[int, IcebergType, bool]: + """Pickle the ListType class.""" + return (self.element_id, self.element_type, self.element_required) + + def __hash__(self) -> int: + """Use the cache hash value of the StructType class.""" + return self._hash + + def __eq__(self, other: Any) -> bool: + """Compare the list type to another list type.""" + return self.element_field == other.element_field if isinstance(other, ListType) else False + + +class MapType(IcebergType): + """A map type in Iceberg. + + Example: + >>> MapType(key_id=1, key_type=StringType(), value_id=2, value_type=IntegerType(), value_required=True) + MapType(key_id=1, key_type=StringType(), value_id=2, value_type=IntegerType(), value_required=True) + """ + + type: Literal["map"] = Field(default="map") + key_id: int = Field(alias="key-id") + key_type: SerializeAsAny[IcebergType] = Field(alias="key") + value_id: int = Field(alias="value-id") + value_type: SerializeAsAny[IcebergType] = Field(alias="value") + value_required: bool = Field(alias="value-required", default=True) + _hash: int = PrivateAttr() + + def __init__( + self, + key_id: Optional[int] = None, + key_type: Optional[IcebergType] = None, + value_id: Optional[int] = None, + value_type: Optional[IcebergType] = None, + value_required: bool = True, + **data: Any, + ): + data["key-id"] = data["key-id"] if "key-id" in data else key_id + data["key"] = data["key"] if "key" in data else key_type + data["value-id"] = data["value-id"] if "value-id" in data else value_id + data["value"] = data["value"] if "value" in data else value_type + data["value-required"] = data["value-required"] if "value-required" in data else value_required + super().__init__(**data) + self._hash = hash(self.__getnewargs__()) + + @cached_property + def key_field(self) -> NestedField: + return NestedField( + name="key", + field_id=self.key_id, + field_type=self.key_type, + required=True, + ) + + @cached_property + def value_field(self) -> NestedField: + return NestedField( + name="value", + field_id=self.value_id, + field_type=self.value_type, + required=self.value_required, + ) + + def __str__(self) -> str: + """Return the string representation of the MapType class.""" + return f"map<{self.key_type}, {self.value_type}>" + + def __getnewargs__(self) -> Tuple[int, IcebergType, int, IcebergType, bool]: + """Pickle the MapType class.""" + return (self.key_id, self.key_type, self.value_id, self.value_type, self.value_required) + + def __hash__(self) -> int: + """Return the hash of the MapType.""" + return self._hash + + def __eq__(self, other: Any) -> bool: + """Compare the MapType to another object.""" + return ( + self.key_field == other.key_field and self.value_field == other.value_field if isinstance(other, MapType) else False + ) + + +class BooleanType(PrimitiveType): + """A boolean data type in Iceberg can be represented using an instance of this class. + + Example: + >>> column_foo = BooleanType() + >>> isinstance(column_foo, BooleanType) + True + >>> column_foo + BooleanType() + """ + + root: Literal["boolean"] = Field(default="boolean") + + +class IntegerType(PrimitiveType): + """An Integer data type in Iceberg can be represented using an instance of this class. + + Integers in Iceberg are 32-bit signed and can be promoted to Longs. + + Example: + >>> column_foo = IntegerType() + >>> isinstance(column_foo, IntegerType) + True + + Attributes: + max (int): The maximum allowed value for Integers, inherited from the canonical Iceberg implementation + in Java (returns `2147483647`) + min (int): The minimum allowed value for Integers, inherited from the canonical Iceberg implementation + in Java (returns `-2147483648`) + """ + + root: Literal["int"] = Field(default="int") + + max: ClassVar[int] = 2147483647 + min: ClassVar[int] = -2147483648 + + +class LongType(PrimitiveType): + """A Long data type in Iceberg can be represented using an instance of this class. + + Longs in Iceberg are 64-bit signed integers. + + Example: + >>> column_foo = LongType() + >>> isinstance(column_foo, LongType) + True + >>> column_foo + LongType() + >>> str(column_foo) + 'long' + + Attributes: + max (int): The maximum allowed value for Longs, inherited from the canonical Iceberg implementation + in Java. (returns `9223372036854775807`) + min (int): The minimum allowed value for Longs, inherited from the canonical Iceberg implementation + in Java (returns `-9223372036854775808`) + """ + + root: Literal["long"] = Field(default="long") + + max: ClassVar[int] = 9223372036854775807 + min: ClassVar[int] = -9223372036854775808 + + +class FloatType(PrimitiveType): + """A Float data type in Iceberg can be represented using an instance of this class. + + Floats in Iceberg are 32-bit IEEE 754 floating points and can be promoted to Doubles. + + Example: + >>> column_foo = FloatType() + >>> isinstance(column_foo, FloatType) + True + >>> column_foo + FloatType() + + Attributes: + max (float): The maximum allowed value for Floats, inherited from the canonical Iceberg implementation + in Java. (returns `3.4028235e38`) + min (float): The minimum allowed value for Floats, inherited from the canonical Iceberg implementation + in Java (returns `-3.4028235e38`) + """ + + max: ClassVar[float] = 3.4028235e38 + min: ClassVar[float] = -3.4028235e38 + + root: Literal["float"] = Field(default="float") + + +class DoubleType(PrimitiveType): + """A Double data type in Iceberg can be represented using an instance of this class. + + Doubles in Iceberg are 64-bit IEEE 754 floating points. + + Example: + >>> column_foo = DoubleType() + >>> isinstance(column_foo, DoubleType) + True + >>> column_foo + DoubleType() + """ + + root: Literal["double"] = Field(default="double") + + +class DateType(PrimitiveType): + """A Date data type in Iceberg can be represented using an instance of this class. + + Dates in Iceberg are calendar dates without a timezone or time. + + Example: + >>> column_foo = DateType() + >>> isinstance(column_foo, DateType) + True + >>> column_foo + DateType() + """ + + root: Literal["date"] = Field(default="date") + + +class TimeType(PrimitiveType): + """A Time data type in Iceberg can be represented using an instance of this class. + + Times in Iceberg have microsecond precision and are a time of day without a date or timezone. + + Example: + >>> column_foo = TimeType() + >>> isinstance(column_foo, TimeType) + True + >>> column_foo + TimeType() + """ + + root: Literal["time"] = Field(default="time") + + +class TimestampType(PrimitiveType): + """A Timestamp data type in Iceberg can be represented using an instance of this class. + + Timestamps in Iceberg have microsecond precision and include a date and a time of day without a timezone. + + Example: + >>> column_foo = TimestampType() + >>> isinstance(column_foo, TimestampType) + True + >>> column_foo + TimestampType() + """ + + root: Literal["timestamp"] = Field(default="timestamp") + + +class TimestamptzType(PrimitiveType): + """A Timestamptz data type in Iceberg can be represented using an instance of this class. + + Timestamptzs in Iceberg are stored as UTC and include a date and a time of day with a timezone. + + Example: + >>> column_foo = TimestamptzType() + >>> isinstance(column_foo, TimestamptzType) + True + >>> column_foo + TimestamptzType() + """ + + root: Literal["timestamptz"] = Field(default="timestamptz") + + +class StringType(PrimitiveType): + """A String data type in Iceberg can be represented using an instance of this class. + + Strings in Iceberg are arbitrary-length character sequences and are encoded with UTF-8. + + Example: + >>> column_foo = StringType() + >>> isinstance(column_foo, StringType) + True + >>> column_foo + StringType() + """ + + root: Literal["string"] = Field(default="string") + + +class UUIDType(PrimitiveType): + """A UUID data type in Iceberg can be represented using an instance of this class. + + UUIDs in Iceberg are universally unique identifiers. + + Example: + >>> column_foo = UUIDType() + >>> isinstance(column_foo, UUIDType) + True + >>> column_foo + UUIDType() + """ + + root: Literal["uuid"] = Field(default="uuid") + + +class BinaryType(PrimitiveType): + """A Binary data type in Iceberg can be represented using an instance of this class. + + Binaries in Iceberg are arbitrary-length byte arrays. + + Example: + >>> column_foo = BinaryType() + >>> isinstance(column_foo, BinaryType) + True + >>> column_foo + BinaryType() + """ + + root: Literal["binary"] = Field(default="binary") diff --git a/pyiceberg/utils/__init__.py b/pyiceberg/utils/__init__.py new file mode 100644 index 0000000000..13a83393a9 --- /dev/null +++ b/pyiceberg/utils/__init__.py @@ -0,0 +1,16 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. diff --git a/pyiceberg/utils/bin_packing.py b/pyiceberg/utils/bin_packing.py new file mode 100644 index 0000000000..ddebde13e2 --- /dev/null +++ b/pyiceberg/utils/bin_packing.py @@ -0,0 +1,106 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +from __future__ import annotations + +from typing import ( + Callable, + Generic, + Iterable, + List, + Optional, + TypeVar, +) + +T = TypeVar("T") + + +class Bin(Generic[T]): + def __init__(self, target_weight: int) -> None: + self.bin_weight = 0 + self.target_weight = target_weight + self.items: List[T] = [] + + def weight(self) -> int: + return self.bin_weight + + def can_add(self, weight: int) -> bool: + return self.bin_weight + weight <= self.target_weight + + def add(self, item: T, weight: int) -> None: + self.bin_weight += weight + self.items.append(item) + + +class PackingIterator(Generic[T]): + bins: List[Bin[T]] + + def __init__( + self, + items: Iterable[T], + target_weight: int, + lookback: int, + weight_func: Callable[[T], int], + largest_bin_first: bool = False, + ) -> None: + self.items = iter(items) + self.target_weight = target_weight + self.lookback = lookback + self.weight_func = weight_func + self.largest_bin_first = largest_bin_first + self.bins = [] + + def __iter__(self) -> PackingIterator[T]: + """Return an iterator for the PackingIterator class.""" + return self + + def __next__(self) -> List[T]: + """Return the next item when iterating over the PackingIterator class.""" + while True: + try: + item = next(self.items) + weight = self.weight_func(item) + bin_ = self.find_bin(weight) + if bin_ is not None: + bin_.add(item, weight) + else: + bin_ = Bin(self.target_weight) + bin_.add(item, weight) + self.bins.append(bin_) + + if len(self.bins) > self.lookback: + return self.remove_bin().items + except StopIteration: + break + + if len(self.bins) == 0: + raise StopIteration() + + return self.remove_bin().items + + def find_bin(self, weight: int) -> Optional[Bin[T]]: + for bin_ in self.bins: + if bin_.can_add(weight): + return bin_ + return None + + def remove_bin(self) -> Bin[T]: + if self.largest_bin_first: + bin_ = max(self.bins, key=lambda b: b.weight()) + self.bins.remove(bin_) + return bin_ + else: + return self.bins.pop(0) diff --git a/pyiceberg/utils/concurrent.py b/pyiceberg/utils/concurrent.py new file mode 100644 index 0000000000..75d6ff8365 --- /dev/null +++ b/pyiceberg/utils/concurrent.py @@ -0,0 +1,48 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +"""Concurrency concepts that support efficient multi-threading.""" +from concurrent.futures import Executor, ThreadPoolExecutor +from typing import Optional + +from pyiceberg.utils.config import Config + + +class ExecutorFactory: + _instance: Optional[Executor] = None + + @staticmethod + def get_or_create() -> Executor: + """Return the same executor in each call.""" + if ExecutorFactory._instance is None: + max_workers = ExecutorFactory.max_workers() + ExecutorFactory._instance = ThreadPoolExecutor(max_workers=max_workers) + + return ExecutorFactory._instance + + @staticmethod + def max_workers() -> Optional[int]: + """Return the max number of workers configured.""" + config = Config() + val = config.config.get("max-workers") + + if val is None: + return None + + try: + return int(val) # type: ignore + except ValueError as err: + raise ValueError(f"Max workers should be an integer or left unset. Current value: {val}") from err diff --git a/pyiceberg/utils/config.py b/pyiceberg/utils/config.py new file mode 100644 index 0000000000..bd15828cba --- /dev/null +++ b/pyiceberg/utils/config.py @@ -0,0 +1,156 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +import logging +import os +from typing import List, Optional + +import strictyaml + +from pyiceberg.typedef import FrozenDict, RecursiveDict + +PYICEBERG = "pyiceberg_" +DEFAULT = "default" +CATALOG = "catalog" +DEFAULT_CATALOG = f"{DEFAULT}-{CATALOG}" +PYICEBERG_HOME = "PYICEBERG_HOME" +PYICEBERG_YML = ".pyiceberg.yaml" + +logger = logging.getLogger(__name__) + + +def merge_config(lhs: RecursiveDict, rhs: RecursiveDict) -> RecursiveDict: + """Merge right-hand side into the left-hand side.""" + new_config = lhs.copy() + for rhs_key, rhs_value in rhs.items(): + if rhs_key in new_config: + lhs_value = new_config[rhs_key] + if isinstance(lhs_value, dict) and isinstance(rhs_value, dict): + # If they are both dicts, then we have to go deeper + new_config[rhs_key] = merge_config(lhs_value, rhs_value) + else: + # Take the non-null value, with precedence on rhs + new_config[rhs_key] = lhs_value or rhs_value + else: + # New key + new_config[rhs_key] = rhs_value + + return new_config + + +def _lowercase_dictionary_keys(input_dict: RecursiveDict) -> RecursiveDict: + """Lowers all the keys of a dictionary in a recursive manner, to make the lookup case-insensitive.""" + return {k.lower(): _lowercase_dictionary_keys(v) if isinstance(v, dict) else v for k, v in input_dict.items()} + + +class Config: + config: RecursiveDict + + def __init__(self) -> None: + config = self._from_configuration_files() or {} + config = merge_config(config, self._from_environment_variables(config)) + self.config = FrozenDict(**config) + + @staticmethod + def _from_configuration_files() -> Optional[RecursiveDict]: + """Load the first configuration file that its finds. + + Will first look in the PYICEBERG_HOME env variable, + and then in the home directory. + """ + + def _load_yaml(directory: Optional[str]) -> Optional[RecursiveDict]: + if directory: + path = os.path.join(directory, PYICEBERG_YML) + if os.path.isfile(path): + with open(path, encoding="utf-8") as f: + yml_str = f.read() + file_config = strictyaml.load(yml_str).data + file_config_lowercase = _lowercase_dictionary_keys(file_config) + return file_config_lowercase + return None + + # Give priority to the PYICEBERG_HOME directory + if pyiceberg_home_config := _load_yaml(os.environ.get(PYICEBERG_HOME)): + return pyiceberg_home_config + # Look into the home directory + if pyiceberg_home_config := _load_yaml(os.path.expanduser("~")): + return pyiceberg_home_config + # Didn't find a config + return None + + @staticmethod + def _from_environment_variables(config: RecursiveDict) -> RecursiveDict: + """Read the environment variables, to check if there are any prepended by PYICEBERG_. + + Args: + config: Existing configuration that's being amended with configuration from environment variables. + + Returns: + Amended configuration. + """ + + def set_property(_config: RecursiveDict, path: List[str], config_value: str) -> None: + while len(path) > 0: + element = path.pop(0) + if len(path) == 0: + # We're at the end + _config[element] = config_value + else: + # We have to go deeper + if element not in _config: + _config[element] = {} + if isinstance(_config[element], dict): + _config = _config[element] # type: ignore + else: + raise ValueError( + f"Incompatible configurations, merging dict with a value: {'.'.join(path)}, value: {config_value}" + ) + + for env_var, config_value in os.environ.items(): + # Make it lowercase to make it case-insensitive + env_var_lower = env_var.lower() + if env_var_lower.startswith(PYICEBERG.lower()): + key = env_var_lower[len(PYICEBERG) :] + parts = key.split("__") + parts_normalized = [part.replace("_", "-") for part in parts] + set_property(config, parts_normalized, config_value) + + return config + + def get_default_catalog_name(self) -> str: + """Return the default catalog name. + + Returns: The name of the default catalog in `default-catalog`. + Returns `default` when the key cannot be found in the config file. + """ + if default_catalog_name := self.config.get(DEFAULT_CATALOG): + if not isinstance(default_catalog_name, str): + raise ValueError(f"Default catalog name should be a str: {default_catalog_name}") + return default_catalog_name + return DEFAULT + + def get_catalog_config(self, catalog_name: str) -> Optional[RecursiveDict]: + if CATALOG in self.config: + catalog_name_lower = catalog_name.lower() + catalogs = self.config[CATALOG] + if not isinstance(catalogs, dict): + raise ValueError(f"Catalog configurations needs to be an object: {catalog_name}") + if catalog_name_lower in catalogs: + catalog_conf = catalogs[catalog_name_lower] + assert isinstance(catalog_conf, dict), f"Configuration path catalogs.{catalog_name_lower} needs to be an object" + return catalog_conf + return None diff --git a/pyiceberg/utils/datetime.py b/pyiceberg/utils/datetime.py new file mode 100644 index 0000000000..1ab56d6ea3 --- /dev/null +++ b/pyiceberg/utils/datetime.py @@ -0,0 +1,185 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +"""Helper methods for working with date/time representations.""" +from __future__ import annotations + +import re +from datetime import ( + date, + datetime, + time, + timedelta, +) + +EPOCH_DATE = date.fromisoformat("1970-01-01") +EPOCH_TIMESTAMP = datetime.fromisoformat("1970-01-01T00:00:00.000000") +ISO_TIMESTAMP = re.compile(r"\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}(.\d{1,6})?") +EPOCH_TIMESTAMPTZ = datetime.fromisoformat("1970-01-01T00:00:00.000000+00:00") +ISO_TIMESTAMPTZ = re.compile(r"\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}(.\d{1,6})?[-+]\d{2}:\d{2}") + + +def micros_to_days(timestamp: int) -> int: + """Convert a timestamp in microseconds to a date in days.""" + return timedelta(microseconds=timestamp).days + + +def micros_to_time(micros: int) -> time: + """Convert a timestamp in microseconds to a time.""" + micros, microseconds = divmod(micros, 1000000) + micros, seconds = divmod(micros, 60) + micros, minutes = divmod(micros, 60) + hours = micros + return time(hour=hours, minute=minutes, second=seconds, microsecond=microseconds) + + +def date_str_to_days(date_str: str) -> int: + """Convert an ISO-8601 formatted date to days from 1970-01-01.""" + return (date.fromisoformat(date_str) - EPOCH_DATE).days + + +def date_to_days(date_val: date) -> int: + """Convert a Python date object to days from 1970-01-01.""" + return (date_val - EPOCH_DATE).days + + +def days_to_date(days: int) -> date: + """Create a date from the number of days from 1970-01-01.""" + return EPOCH_DATE + timedelta(days) + + +def time_str_to_micros(time_str: str) -> int: + """Convert an ISO-8601 formatted time to microseconds from midnight.""" + return time_to_micros(time.fromisoformat(time_str)) + + +def time_to_micros(t: time) -> int: + """Convert a datetime.time object to microseconds from midnight.""" + return (((t.hour * 60 + t.minute) * 60) + t.second) * 1_000_000 + t.microsecond + + +def datetime_to_micros(dt: datetime) -> int: + """Convert a datetime to microseconds from 1970-01-01T00:00:00.000000.""" + if dt.tzinfo: + delta = dt - EPOCH_TIMESTAMPTZ + else: + delta = dt - EPOCH_TIMESTAMP + return (delta.days * 86400 + delta.seconds) * 1_000_000 + delta.microseconds + + +def timestamp_to_micros(timestamp_str: str) -> int: + """Convert an ISO-9601 formatted timestamp without zone to microseconds from 1970-01-01T00:00:00.000000.""" + if ISO_TIMESTAMP.fullmatch(timestamp_str): + return datetime_to_micros(datetime.fromisoformat(timestamp_str)) + if ISO_TIMESTAMPTZ.fullmatch(timestamp_str): + # When we can match a timestamp without a zone, we can give a more specific error + raise ValueError(f"Zone offset provided, but not expected: {timestamp_str}") + raise ValueError(f"Invalid timestamp without zone: {timestamp_str} (must be ISO-8601)") + + +def datetime_to_millis(dt: datetime) -> int: + """Convert a datetime to milliseconds from 1970-01-01T00:00:00.000000.""" + if dt.tzinfo: + delta = dt - EPOCH_TIMESTAMPTZ + else: + delta = dt - EPOCH_TIMESTAMP + return (delta.days * 86400 + delta.seconds) * 1_000 + delta.microseconds // 1_000 + + +def millis_to_datetime(millis: int) -> datetime: + """Convert milliseconds from epoch to a timestamp.""" + dt = timedelta(milliseconds=millis) + return EPOCH_TIMESTAMP + dt + + +def timestamptz_to_micros(timestamptz_str: str) -> int: + """Convert an ISO-8601 formatted timestamp with zone to microseconds from 1970-01-01T00:00:00.000000+00:00.""" + if ISO_TIMESTAMPTZ.fullmatch(timestamptz_str): + return datetime_to_micros(datetime.fromisoformat(timestamptz_str)) + if ISO_TIMESTAMP.fullmatch(timestamptz_str): + # When we can match a timestamp without a zone, we can give a more specific error + raise ValueError(f"Missing zone offset: {timestamptz_str} (must be ISO-8601)") + raise ValueError(f"Invalid timestamp with zone: {timestamptz_str} (must be ISO-8601)") + + +def micros_to_timestamp(micros: int) -> datetime: + """Convert microseconds from epoch to a timestamp.""" + dt = timedelta(microseconds=micros) + return EPOCH_TIMESTAMP + dt + + +def micros_to_timestamptz(micros: int) -> datetime: + """Convert microseconds from epoch to an utc timestamp.""" + dt = timedelta(microseconds=micros) + return EPOCH_TIMESTAMPTZ + dt + + +def to_human_year(year_ordinal: int) -> str: + """Convert a DateType value to human string.""" + return f"{EPOCH_TIMESTAMP.year + year_ordinal:0=4d}" + + +def to_human_month(month_ordinal: int) -> str: + """Convert a DateType value to human string.""" + return f"{EPOCH_TIMESTAMP.year + month_ordinal // 12:0=4d}-{1 + month_ordinal % 12:0=2d}" + + +def to_human_day(day_ordinal: int) -> str: + """Convert a DateType value to human string.""" + return (EPOCH_DATE + timedelta(days=day_ordinal)).isoformat() + + +def to_human_hour(hour_ordinal: int) -> str: + """Convert a DateType value to human string.""" + return (EPOCH_TIMESTAMP + timedelta(hours=hour_ordinal)).isoformat("-", "hours") + + +def to_human_time(micros_from_midnight: int) -> str: + """Convert a TimeType value to human string.""" + return micros_to_time(micros_from_midnight).isoformat() + + +def to_human_timestamptz(timestamp_micros: int) -> str: + """Convert a TimestamptzType value to human string.""" + return (EPOCH_TIMESTAMPTZ + timedelta(microseconds=timestamp_micros)).isoformat() + + +def to_human_timestamp(timestamp_micros: int) -> str: + """Convert a TimestampType value to human string.""" + return (EPOCH_TIMESTAMP + timedelta(microseconds=timestamp_micros)).isoformat() + + +def micros_to_hours(micros: int) -> int: + """Convert a timestamp in microseconds to hours from 1970-01-01T00:00.""" + return micros // 3_600_000_000 + + +def days_to_months(days: int) -> int: + d = days_to_date(days) + return (d.year - EPOCH_DATE.year) * 12 + (d.month - EPOCH_DATE.month) + + +def micros_to_months(micros: int) -> int: + dt = micros_to_timestamp(micros) + return (dt.year - EPOCH_TIMESTAMP.year) * 12 + (dt.month - EPOCH_TIMESTAMP.month) + + +def days_to_years(days: int) -> int: + return days_to_date(days).year - EPOCH_DATE.year + + +def micros_to_years(micros: int) -> int: + return micros_to_timestamp(micros).year - EPOCH_TIMESTAMP.year diff --git a/pyiceberg/utils/decimal.py b/pyiceberg/utils/decimal.py new file mode 100644 index 0000000000..503545b69b --- /dev/null +++ b/pyiceberg/utils/decimal.py @@ -0,0 +1,127 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +"""Helper methods for working with Python Decimals.""" +import math +from decimal import Decimal +from typing import Optional, Union + + +def decimal_to_unscaled(value: Decimal) -> int: + """Get an unscaled value given a Decimal value. + + Args: + value (Decimal): A Decimal instance. + + Returns: + int: The unscaled value. + """ + sign, digits, _ = value.as_tuple() + return int(Decimal((sign, digits, 0)).to_integral_value()) + + +def unscaled_to_decimal(unscaled: int, scale: int) -> Decimal: + """Get a scaled Decimal value given an unscaled value and a scale. + + Args: + unscaled (int): An unscaled value. + scale (int): A scale to set for the returned Decimal instance. + + Returns: + Decimal: A scaled Decimal instance. + """ + sign, digits, _ = Decimal(unscaled).as_tuple() + return Decimal((sign, digits, -scale)) + + +def bytes_required(value: Union[int, Decimal]) -> int: + """Return the minimum number of bytes needed to serialize a decimal or unscaled value. + + Args: + value (int | Decimal): a Decimal value or unscaled int value. + + Returns: + int: the minimum number of bytes needed to serialize the value. + """ + if isinstance(value, int): + return (value.bit_length() + 7) // 8 + elif isinstance(value, Decimal): + return (decimal_to_unscaled(value).bit_length() + 7) // 8 + + raise ValueError(f"Unsupported value: {value}") + + +def decimal_to_bytes(value: Decimal, byte_length: Optional[int] = None) -> bytes: + """Return a byte representation of a decimal. + + Args: + value (Decimal): a decimal value. + byte_length (int): The number of bytes. + Returns: + bytes: the unscaled value of the Decimal as bytes. + """ + unscaled_value = decimal_to_unscaled(value) + if byte_length is None: + byte_length = bytes_required(unscaled_value) + return unscaled_value.to_bytes(byte_length, byteorder="big", signed=True) + + +def bytes_to_decimal(value: bytes, scale: int) -> Decimal: + """Return a decimal from the bytes. + + Args: + value (bytes): tbe bytes to be converted into a decimal. + scale (int): the scale of the decimal. + + Returns: + Decimal: the scaled decimal. + """ + unscaled_datum = int.from_bytes(value, byteorder="big", signed=True) + return unscaled_to_decimal(unscaled_datum, scale) + + +def truncate_decimal(value: Decimal, width: int) -> Decimal: + """Get a truncated Decimal value given a decimal value and a width. + + Args: + value (Decimal): a decimal value. + width (int): A width for the returned Decimal instance. + Returns: + Decimal: A truncated Decimal instance. + """ + unscaled_value = decimal_to_unscaled(value) + applied_value = unscaled_value - (((unscaled_value % width) + width) % width) + return unscaled_to_decimal(applied_value, abs(int(value.as_tuple().exponent))) + + +MAX_PRECISION = tuple(math.floor(math.log10(math.fabs(math.pow(2, 8 * pos - 1) - 1))) for pos in range(24)) +REQUIRED_LENGTH = tuple(next(pos for pos in range(24) if p <= MAX_PRECISION[pos]) for p in range(40)) + + +def decimal_required_bytes(precision: int) -> int: + """Compute the number of bytes required to store a precision. + + Args: + precision: The number of digits to store. + + Returns: + The number of bytes required to store a decimal with a certain precision. + """ + if precision <= 0 or precision >= 40: + raise ValueError(f"Unsupported precision, outside of (0, 40]: {precision}") + + return REQUIRED_LENGTH[precision] diff --git a/pyiceberg/utils/deprecated.py b/pyiceberg/utils/deprecated.py new file mode 100644 index 0000000000..0de8cbfad8 --- /dev/null +++ b/pyiceberg/utils/deprecated.py @@ -0,0 +1,45 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +import functools +import warnings +from typing import Any, Callable, Optional + + +def deprecated(deprecated_in: str, removed_in: str, help_message: Optional[str] = None) -> Callable: # type: ignore + """Mark functions as deprecated. + + Adding this will result in a warning being emitted when the function is used. + """ + if help_message is not None: + help_message = f" {help_message}." + + def decorator(func: Callable): # type: ignore + @functools.wraps(func) + def new_func(*args: Any, **kwargs: Any) -> Any: + warnings.simplefilter("always", DeprecationWarning) # turn off filter + + warnings.warn( + f"Call to {func.__name__}, deprecated in {deprecated_in}, will be removed in {removed_in}.{help_message}", + category=DeprecationWarning, + stacklevel=2, + ) + warnings.simplefilter("default", DeprecationWarning) # reset filter + return func(*args, **kwargs) + + return new_func + + return decorator diff --git a/pyiceberg/utils/lazydict.py b/pyiceberg/utils/lazydict.py new file mode 100644 index 0000000000..dfe251c0a7 --- /dev/null +++ b/pyiceberg/utils/lazydict.py @@ -0,0 +1,68 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from typing import ( + Dict, + Iterator, + Mapping, + Optional, + Sequence, + TypeVar, + Union, + cast, +) + +K = TypeVar("K") +V = TypeVar("V") + + +class LazyDict(Mapping[K, V]): + """Lazily build a dictionary from an array of items.""" + + __slots__ = ("_contents", "_dict") + + # Since Python's type system is not powerful enough to express the type of the + # contents of the dictionary, we use specify the type as a sequence of either K or V + # values. + # + # Rather than spending the runtime cost of checking the type of each item, we presume + # that the developer has correctly used the class and that the contents are valid. + def __init__(self, contents: Sequence[Sequence[Union[K, V]]]): + self._contents = contents + self._dict: Optional[Dict[K, V]] = None + + def _build_dict(self) -> Dict[K, V]: + self._dict = {} + for item in self._contents: + self._dict.update(dict(zip(cast(Sequence[K], item[::2]), cast(Sequence[V], item[1::2])))) + + return self._dict + + def __getitem__(self, key: K, /) -> V: + """Return the value for the given key.""" + source = self._dict or self._build_dict() + return source[key] + + def __iter__(self) -> Iterator[K]: + """Return an iterator over the keys of the dictionary.""" + source = self._dict or self._build_dict() + return iter(source) + + def __len__(self) -> int: + """Return the number of items in the dictionary.""" + source = self._dict or self._build_dict() + return len(source) diff --git a/pyiceberg/utils/parsing.py b/pyiceberg/utils/parsing.py new file mode 100644 index 0000000000..200904fd97 --- /dev/null +++ b/pyiceberg/utils/parsing.py @@ -0,0 +1,37 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +import re +from re import Pattern + +from pyiceberg.exceptions import ValidationError + + +class ParseNumberFromBrackets: + """Extracts the size from a string in the form of prefix[22].""" + + regex: Pattern # type: ignore + prefix: str + + def __init__(self, prefix: str): + self.prefix = prefix + self.regex = re.compile(rf"{prefix}\[(\d+)\]") + + def match(self, str_repr: str) -> int: + matches = self.regex.search(str_repr) + if matches: + return int(matches.group(1)) + raise ValidationError(f"Could not match {str_repr}, expected format {self.prefix}[22]") diff --git a/pyiceberg/utils/schema_conversion.py b/pyiceberg/utils/schema_conversion.py new file mode 100644 index 0000000000..74d0ae9ee7 --- /dev/null +++ b/pyiceberg/utils/schema_conversion.py @@ -0,0 +1,609 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +"""Utility class for converting between Avro and Iceberg schemas.""" +import logging +from typing import ( + Any, + Dict, + List, + Optional, + Tuple, + Union, +) + +from pyiceberg.schema import Schema, SchemaVisitorPerPrimitiveType, visit +from pyiceberg.types import ( + BinaryType, + BooleanType, + DateType, + DecimalType, + DoubleType, + FixedType, + FloatType, + IcebergType, + IntegerType, + ListType, + LongType, + MapType, + NestedField, + PrimitiveType, + StringType, + StructType, + TimestampType, + TimestamptzType, + TimeType, + UUIDType, +) + +logger = logging.getLogger(__name__) + +PRIMITIVE_FIELD_TYPE_MAPPING: Dict[str, PrimitiveType] = { + "boolean": BooleanType(), + "bytes": BinaryType(), + "double": DoubleType(), + "float": FloatType(), + "int": IntegerType(), + "long": LongType(), + "string": StringType(), + "enum": StringType(), +} + +LOGICAL_FIELD_TYPE_MAPPING: Dict[Tuple[str, str], PrimitiveType] = { + ("date", "int"): DateType(), + ("time-micros", "long"): TimeType(), + ("timestamp-micros", "long"): TimestampType(), + ("uuid", "fixed"): UUIDType(), +} + +AvroType = Union[str, Any] + + +class AvroSchemaConversion: + def avro_to_iceberg(self, avro_schema: Dict[str, Any]) -> Schema: + """Convert an Apache Avro into an Apache Iceberg schema equivalent. + + This expects to have field id's to be encoded in the Avro schema: + + { + "type": "record", + "name": "manifest_file", + "fields": [ + {"name": "manifest_path", "type": "string", "doc": "Location URI with FS scheme", "field-id": 500}, + {"name": "manifest_length", "type": "long", "doc": "Total file size in bytes", "field-id": 501} + ] + } + + Example: + This converts an Avro schema into an Iceberg schema: + + >>> avro_schema = AvroSchemaConversion().avro_to_iceberg({ + ... "type": "record", + ... "name": "manifest_file", + ... "fields": [ + ... {"name": "manifest_path", "type": "string", "doc": "Location URI with FS scheme", "field-id": 500}, + ... {"name": "manifest_length", "type": "long", "doc": "Total file size in bytes", "field-id": 501} + ... ] + ... }) + >>> iceberg_schema = Schema( + ... NestedField( + ... field_id=500, name="manifest_path", field_type=StringType(), required=False, doc="Location URI with FS scheme" + ... ), + ... NestedField( + ... field_id=501, name="manifest_length", field_type=LongType(), required=False, doc="Total file size in bytes" + ... ), + ... schema_id=1 + ... ) + >>> avro_schema == iceberg_schema + True + + Args: + avro_schema (Dict[str, Any]): The JSON decoded Avro schema. + + Returns: + Equivalent Iceberg schema. + """ + return Schema(*[self._convert_field(field) for field in avro_schema["fields"]], schema_id=1) + + def iceberg_to_avro(self, schema: Schema, schema_name: Optional[str] = None) -> AvroType: + """Convert an Iceberg schema into an Avro dictionary that can be serialized to JSON.""" + return visit(schema, ConvertSchemaToAvro(schema_name)) + + def _resolve_union( + self, type_union: Union[Dict[str, str], List[Union[str, Dict[str, str]]], str] + ) -> Tuple[Union[str, Dict[str, Any]], bool]: + """ + Convert Unions into their type and resolves if the field is required. + + Examples: + >>> AvroSchemaConversion()._resolve_union('str') + ('str', True) + >>> AvroSchemaConversion()._resolve_union(['null', 'str']) + ('str', False) + >>> AvroSchemaConversion()._resolve_union([{'type': 'str'}]) + ({'type': 'str'}, True) + >>> AvroSchemaConversion()._resolve_union(['null', {'type': 'str'}]) + ({'type': 'str'}, False) + + Args: + type_union: The field, can be a string 'str', list ['null', 'str'], or dict {"type": 'str'}. + + Returns: + A tuple containing the type and if required. + + Raises: + TypeError: In the case non-optional union types are encountered. + """ + avro_types: Union[Dict[str, str], List[Union[Dict[str, str], str]]] + if isinstance(type_union, str): + # It is a primitive and required + return type_union, True + elif isinstance(type_union, dict): + # It is a context and required + return type_union, True + else: + avro_types = type_union + + if len(avro_types) > 2: + raise TypeError(f"Non-optional types aren't part of the Iceberg specification: {avro_types}") + + # For the Iceberg spec it is required to set the default value to null + # From https://iceberg.apache.org/spec/#avro + # Optional fields must always set the Avro field default value to null. + # + # This means that null has to come first: + # https://avro.apache.org/docs/current/spec.html + # type of the default value must match the first element of the union. + if "null" != avro_types[0]: + raise TypeError("Only null-unions are supported") + + # Filter the null value and return the type + return list(filter(lambda t: t != "null", avro_types))[0], False + + def _convert_schema(self, avro_type: Union[str, Dict[str, Any]]) -> IcebergType: + """ + Resolve the Avro type. + + Args: + avro_type: The Avro type, can be simple or complex. + + Returns: + The equivalent IcebergType. + + Raises: + ValueError: When there are unknown types + """ + if isinstance(avro_type, str) and avro_type in PRIMITIVE_FIELD_TYPE_MAPPING: + return PRIMITIVE_FIELD_TYPE_MAPPING[avro_type] + elif isinstance(avro_type, dict): + if "logicalType" in avro_type: + return self._convert_logical_type(avro_type) + else: + # Resolve potential nested types + while "type" in avro_type and isinstance(avro_type["type"], dict): + avro_type = avro_type["type"] + type_identifier = avro_type["type"] + if type_identifier == "record": + return self._convert_record_type(avro_type) + elif type_identifier == "array": + return self._convert_array_type(avro_type) + elif type_identifier == "map": + return self._convert_map_type(avro_type) + elif type_identifier == "fixed": + return self._convert_fixed_type(avro_type) + elif isinstance(type_identifier, str) and type_identifier in PRIMITIVE_FIELD_TYPE_MAPPING: + return PRIMITIVE_FIELD_TYPE_MAPPING[type_identifier] + else: + raise TypeError(f"Unknown type: {avro_type}") + else: + raise TypeError(f"Unknown type: {avro_type}") + + def _convert_field(self, field: Dict[str, Any]) -> NestedField: + """Convert an Avro field into an Iceberg equivalent field. + + Args: + field: The Avro field. + + Returns: + The Iceberg equivalent field. + """ + if "field-id" not in field: + raise ValueError(f"Cannot convert field, missing field-id: {field}") + + plain_type, required = self._resolve_union(field["type"]) + + return NestedField( + field_id=field["field-id"], + name=field["name"], + field_type=self._convert_schema(plain_type), + required=required, + doc=field.get("doc"), + ) + + def _convert_record_type(self, record_type: Dict[str, Any]) -> StructType: + """ + Convert the fields from a record into an Iceberg struct. + + Examples: + >>> from pyiceberg.utils.schema_conversion import AvroSchemaConversion + >>> record_type = { + ... "type": "record", + ... "name": "r508", + ... "fields": [{ + ... "name": "contains_null", + ... "type": "boolean", + ... "doc": "True if any file has a null partition value", + ... "field-id": 509, + ... }, { + ... "name": "contains_nan", + ... "type": ["null", "boolean"], + ... "doc": "True if any file has a nan partition value", + ... "default": None, + ... "field-id": 518, + ... }], + ... } + >>> actual = AvroSchemaConversion()._convert_record_type(record_type) + >>> expected = StructType( + ... fields=( + ... NestedField( + ... field_id=509, + ... name="contains_null", + ... field_type=BooleanType(), + ... required=False, + ... doc="True if any file has a null partition value", + ... ), + ... NestedField( + ... field_id=518, + ... name="contains_nan", + ... field_type=BooleanType(), + ... required=True, + ... doc="True if any file has a nan partition value", + ... ), + ... ) + ... ) + >>> expected == actual + True + + Args: + record_type: The record type itself. + + Returns: A StructType. + """ + if record_type["type"] != "record": + raise ValueError(f"Expected record type, got: {record_type}") + + return StructType(*[self._convert_field(field) for field in record_type["fields"]]) + + def _convert_array_type(self, array_type: Dict[str, Any]) -> ListType: + if "element-id" not in array_type: + raise ValueError(f"Cannot convert array-type, missing element-id: {array_type}") + + plain_type, element_required = self._resolve_union(array_type["items"]) + + return ListType( + element_id=array_type["element-id"], + element_type=self._convert_schema(plain_type), + element_required=element_required, + ) + + def _convert_map_type(self, map_type: Dict[str, Any]) -> MapType: + """Convert an avro map type into an Iceberg MapType. + + Args: + map_type: The dict that describes the Avro map type. + + Examples: + >>> from pyiceberg.utils.schema_conversion import AvroSchemaConversion + >>> avro_field = { + ... "type": "map", + ... "values": ["null", "long"], + ... "key-id": 101, + ... "value-id": 102, + ... } + >>> actual = AvroSchemaConversion()._convert_map_type(avro_field) + >>> expected = MapType( + ... key_id=101, + ... key_type=StringType(), + ... value_id=102, + ... value_type=LongType(), + ... value_required=True + ... ) + >>> actual == expected + True + + Returns: A MapType. + """ + value_type, value_required = self._resolve_union(map_type["values"]) + return MapType( + key_id=map_type["key-id"], + # Avro only supports string keys + key_type=StringType(), + value_id=map_type["value-id"], + value_type=self._convert_schema(value_type), + value_required=value_required, + ) + + def _convert_logical_type(self, avro_logical_type: Dict[str, Any]) -> IcebergType: + """Convert a schema with a logical type annotation into an IcebergType. + + For the decimal and map we need to fetch more keys from the dict, and for + the simple ones we can just look it up in the mapping. + + Examples: + >>> from pyiceberg.utils.schema_conversion import AvroSchemaConversion + >>> avro_logical_type = { + ... "type": "int", + ... "logicalType": "date" + ... } + >>> actual = AvroSchemaConversion()._convert_logical_type(avro_logical_type) + >>> actual == DateType() + True + + Args: + avro_logical_type: The logical type. + + Returns: + The converted logical type. + + Raises: + ValueError: When the logical type is unknown. + """ + logical_type = avro_logical_type["logicalType"] + physical_type = avro_logical_type["type"] + if logical_type == "decimal": + return self._convert_logical_decimal_type(avro_logical_type) + elif logical_type == "map": + return self._convert_logical_map_type(avro_logical_type) + elif logical_type == "timestamp-micros": + if avro_logical_type.get("adjust-to-utc", False) is True: + return TimestamptzType() + else: + return TimestampType() + elif (logical_type, physical_type) in LOGICAL_FIELD_TYPE_MAPPING: + return LOGICAL_FIELD_TYPE_MAPPING[(logical_type, physical_type)] + else: + raise ValueError(f"Unknown logical/physical type combination: {avro_logical_type}") + + def _convert_logical_decimal_type(self, avro_type: Dict[str, Any]) -> DecimalType: + """Convert an avro type to an Iceberg DecimalType. + + Args: + avro_type: The Avro type. + + Examples: + >>> from pyiceberg.utils.schema_conversion import AvroSchemaConversion + >>> avro_decimal_type = { + ... "type": "bytes", + ... "logicalType": "decimal", + ... "precision": 19, + ... "scale": 25 + ... } + >>> actual = AvroSchemaConversion()._convert_logical_decimal_type(avro_decimal_type) + >>> expected = DecimalType( + ... precision=19, + ... scale=25 + ... ) + >>> actual == expected + True + + Returns: + A Iceberg DecimalType. + """ + return DecimalType(precision=avro_type["precision"], scale=avro_type["scale"]) + + def _convert_logical_map_type(self, avro_type: Dict[str, Any]) -> MapType: + """Convert an avro map type to an Iceberg MapType. + + In the case where a map hasn't a key as a type you can use a logical map to still encode this in Avro. + + Args: + avro_type: The Avro Type. + + Examples: + >>> from pyiceberg.utils.schema_conversion import AvroSchemaConversion + >>> avro_type = { + ... "type": "array", + ... "logicalType": "map", + ... "items": { + ... "type": "record", + ... "name": "k101_v102", + ... "fields": [ + ... {"name": "key", "type": "int", "field-id": 101}, + ... {"name": "value", "type": "string", "field-id": 102}, + ... ], + ... }, + ... } + >>> actual = AvroSchemaConversion()._convert_logical_map_type(avro_type) + >>> expected = MapType( + ... key_id=101, + ... key_type=IntegerType(), + ... value_id=102, + ... value_type=StringType(), + ... value_required=False + ... ) + >>> actual == expected + True + + .. _Apache Iceberg specification: + https://iceberg.apache.org/spec/#appendix-a-format-specific-requirements + + Returns: + The logical map. + """ + fields = avro_type["items"]["fields"] + if len(fields) != 2: + raise ValueError(f'Invalid key-value pair schema: {avro_type["items"]}') + key = self._convert_field(list(filter(lambda f: f["name"] == "key", fields))[0]) + value = self._convert_field(list(filter(lambda f: f["name"] == "value", fields))[0]) + return MapType( + key_id=key.field_id, + key_type=key.field_type, + value_id=value.field_id, + value_type=value.field_type, + value_required=value.required, + ) + + def _convert_fixed_type(self, avro_type: Dict[str, Any]) -> FixedType: + """ + Convert Avro Type to the equivalent Iceberg fixed type. + + - https://avro.apache.org/docs/current/spec.html#Fixed + + Args: + avro_type: The Avro type. + + Examples: + >>> from pyiceberg.utils.schema_conversion import AvroSchemaConversion + >>> avro_fixed_type = { + ... "name": "md5", + ... "type": "fixed", + ... "size": 16 + ... } + >>> FixedType(length=16) == AvroSchemaConversion()._convert_fixed_type(avro_fixed_type) + True + + Returns: + An Iceberg equivalent fixed type. + """ + return FixedType(length=avro_type["size"]) + + +class ConvertSchemaToAvro(SchemaVisitorPerPrimitiveType[AvroType]): + """Convert an Iceberg schema to an Avro schema.""" + + schema_name: Optional[str] + last_list_field_id: int + last_map_key_field_id: int + last_map_value_field_id: int + + def __init__(self, schema_name: Optional[str]) -> None: + """Convert an Iceberg schema to an Avro schema. + + Args: + schema_name: The name of the root record. + """ + self.schema_name = schema_name + + def schema(self, schema: Schema, struct_result: AvroType) -> AvroType: + if isinstance(struct_result, dict) and self.schema_name is not None: + struct_result["name"] = self.schema_name + return struct_result + + def before_list_element(self, element: NestedField) -> None: + self.last_list_field_id = element.field_id + + def before_map_key(self, key: NestedField) -> None: + self.last_map_key_field_id = key.field_id + + def before_map_value(self, value: NestedField) -> None: + self.last_map_value_field_id = value.field_id + + def struct(self, struct: StructType, field_results: List[AvroType]) -> AvroType: + return {"type": "record", "fields": field_results} + + def field(self, field: NestedField, field_result: AvroType) -> AvroType: + # Sets the schema name + if isinstance(field_result, dict) and field_result.get("type") == "record": + field_result["name"] = f"r{field.field_id}" + + result = { + "name": field.name, + "field-id": field.field_id, + "type": field_result if field.required else ["null", field_result], + } + + if field.optional: + result["default"] = None + + if field.doc is not None: + result["doc"] = field.doc + + return result + + def list(self, list_type: ListType, element_result: AvroType) -> AvroType: + # Sets the schema name in case of a record + if isinstance(element_result, dict) and element_result.get("type") == "record": + element_result["name"] = f"r{self.last_list_field_id}" + return {"type": "array", "element-id": self.last_list_field_id, "items": element_result} + + def map(self, map_type: MapType, key_result: AvroType, value_result: AvroType) -> AvroType: + if isinstance(key_result, StringType): + # Avro Maps does not support other keys than a String, + return { + "type": "map", + "values": value_result, + "key-id": self.last_map_key_field_id, + "value-id": self.last_map_value_field_id, + } + else: + # Creates a logical map that's a list of schema's + # binary compatible + return { + "type": "array", + "items": { + "type": "record", + "name": f"k{self.last_map_key_field_id}_v{self.last_map_value_field_id}", + "fields": [ + {"name": "key", "type": key_result, "field-id": self.last_map_key_field_id}, + {"name": "value", "type": value_result, "field-id": self.last_map_value_field_id}, + ], + }, + "logicalType": "map", + } + + def visit_fixed(self, fixed_type: FixedType) -> AvroType: + return {"type": "fixed", "size": len(fixed_type)} + + def visit_decimal(self, decimal_type: DecimalType) -> AvroType: + return {"type": "bytes", "logicalType": "decimal", "precision": decimal_type.precision, "scale": decimal_type.scale} + + def visit_boolean(self, boolean_type: BooleanType) -> AvroType: + return "boolean" + + def visit_integer(self, integer_type: IntegerType) -> AvroType: + return "int" + + def visit_long(self, long_type: LongType) -> AvroType: + return "long" + + def visit_float(self, float_type: FloatType) -> AvroType: + return "float" + + def visit_double(self, double_type: DoubleType) -> AvroType: + return "double" + + def visit_date(self, date_type: DateType) -> AvroType: + return {"type": "int", "logicalType": "date"} + + def visit_time(self, time_type: TimeType) -> AvroType: + return {"type": "long", "logicalType": "time-micros"} + + def visit_timestamp(self, timestamp_type: TimestampType) -> AvroType: + # Iceberg only supports micro's + return {"type": "long", "logicalType": "timestamp-micros", "adjust-to-utc": False} + + def visit_timestamptz(self, timestamptz_type: TimestamptzType) -> AvroType: + # Iceberg only supports micro's + return {"type": "long", "logicalType": "timestamp-micros", "adjust-to-utc": True} + + def visit_string(self, string_type: StringType) -> AvroType: + return "string" + + def visit_uuid(self, uuid_type: UUIDType) -> AvroType: + return {"type": "fixed", "size": "16", "logicalType": "uuid"} + + def visit_binary(self, binary_type: BinaryType) -> AvroType: + return "bytes" diff --git a/pyiceberg/utils/singleton.py b/pyiceberg/utils/singleton.py new file mode 100644 index 0000000000..9380b89cbc --- /dev/null +++ b/pyiceberg/utils/singleton.py @@ -0,0 +1,48 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +""" +This is a singleton metaclass that can be used to cache and re-use existing objects. + +In the Iceberg codebase we have a lot of objects that are stateless (for example Types such as StringType, +BooleanType etc). FixedTypes have arguments (eg. Fixed[22]) that we also make part of the key when caching +the newly created object. + +The Singleton uses a metaclass which essentially defines a new type. When the Type gets created, it will first +evaluate the `__call__` method with all the arguments. If we already initialized a class earlier, we'll just +return it. + +More information on metaclasses: https://docs.python.org/3/reference/datamodel.html#metaclasses +""" +from typing import Any, ClassVar, Dict + + +def _convert_to_hashable_type(element: Any) -> Any: + if isinstance(element, dict): + return tuple((_convert_to_hashable_type(k), _convert_to_hashable_type(v)) for k, v in element.items()) + elif isinstance(element, list): + return tuple(map(_convert_to_hashable_type, element)) + return element + + +class Singleton: + _instances: ClassVar[Dict] = {} # type: ignore + + def __new__(cls, *args, **kwargs): # type: ignore + key = (cls, tuple(args), _convert_to_hashable_type(kwargs)) + if key not in cls._instances: + cls._instances[key] = super().__new__(cls) + return cls._instances[key] diff --git a/pyiceberg/utils/truncate.py b/pyiceberg/utils/truncate.py new file mode 100644 index 0000000000..4ddb2401c4 --- /dev/null +++ b/pyiceberg/utils/truncate.py @@ -0,0 +1,48 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +from typing import Optional + + +def truncate_upper_bound_text_string(value: str, trunc_length: Optional[int]) -> Optional[str]: + result = value[:trunc_length] + if result != value: + chars = [*result] + + for i in range(-1, -len(result) - 1, -1): + try: + to_inc = ord(chars[i]) + # will raise exception if the highest unicode code is reached + _next = chr(to_inc + 1) + chars[i] = _next + return "".join(chars) + except ValueError: + pass + return None # didn't find a valid upper bound + return result + + +def truncate_upper_bound_binary_string(value: bytes, trunc_length: Optional[int]) -> Optional[bytes]: + result = value[:trunc_length] + if result != value: + _bytes = [*result] + for i in range(-1, -len(result) - 1, -1): + if _bytes[i] < 255: + _bytes[i] += 1 + return b"".join([i.to_bytes(1, byteorder="little") for i in _bytes]) + return None + + return result diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000000..c847c8b312 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,354 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +[tool.poetry] +name = "pyiceberg" +version = "0.5.0" +readme = "README.md" +homepage = "https://py.iceberg.apache.org/" +repository = "https://github.com/apache/iceberg/" +description = "Apache Iceberg is an open table format for huge analytic datasets" +authors = ["Apache Software Foundation "] +license = "Apache License 2.0" +classifiers = [ + "License :: OSI Approved :: Apache Software License", + "Operating System :: OS Independent", + "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11" +] +packages = [ + { include = "pyiceberg" }, + { from = "vendor", include = "fb303" }, + { from = "vendor", include = "hive_metastore" }, + { include = "tests", format = "sdist" }, + { include = "Makefile", format = "sdist" }, + { include = "NOTICE", format = ["sdist", "wheel"] } +] +include = [ + { path = "dev", format = "sdist" }, + { path = "pyiceberg/**/*.so", format = "wheel" }, + { path = "pyiceberg/**/*.pyd", format = "wheel" }, +] + +[tool.poetry.dependencies] +python = "^3.8" +mmhash3 = ">=3.0.0,<4.0.0" +requests = ">=2.20.0,<3.0.0" +click = ">=7.1.1,<9.0.0" +rich = ">=10.11.0,<14.0.0" +strictyaml = ">=1.7.0,<2.0.0" # CVE-2020-14343 was fixed in 5.4. +pydantic = ">=2.0,<3.0" +sortedcontainers = "2.4.0" +fsspec = ">=2023.1.0,<2024.1.0" +pyparsing = ">=3.1.0,<4.0.0" +zstandard = ">=0.13.0,<1.0.0" +pyarrow = { version = ">=9.0.0,<14.0.0", optional = true } +pandas = { version = ">=1.0.0,<3.0.0", optional = true } +duckdb = { version = ">=0.5.0,<1.0.0", optional = true } +ray = { version = ">=2.0.0,<3.0.0", optional = true } +python-snappy = { version = ">=0.6.0,<1.0.0", optional = true } +thrift = { version = ">=0.13.0,<1.0.0", optional = true } +mypy-boto3-glue = { version = ">=1.28.18", optional = true } +boto3 = { version = ">=1.24.59", optional = true } +s3fs = { version = ">=2023.1.0,<2024.1.0", optional = true } +adlfs = { version = ">=2023.1.0,<2024.1.0", optional = true } +gcsfs = { version = ">=2023.1.0,<2024.1.0", optional = true } +psycopg2-binary = { version = ">=2.9.6", optional = true } +sqlalchemy = { version = "^2.0.18", optional = true } + +[tool.poetry.dev-dependencies] +pytest = "7.4.2" +pytest-checkdocs = "2.10.1" +pre-commit = "3.4.0" +fastavro = "1.8.3" +coverage = { version = "^7.3.1", extras = ["toml"] } +requests-mock = "1.11.0" +moto = "^4.2.4" +typing-extensions = "4.7.1" +pytest-mock = "3.11.1" +cython = "3.0.2" + +[[tool.mypy.overrides]] +module = "pytest_mock.*" +ignore_missing_imports = true + +[tool.poetry.scripts] +pyiceberg = "pyiceberg.cli.console:run" + +[build-system] +requires = ["poetry-core>=1.0.0", "wheel", "Cython>=3.0.0", "setuptools"] +build-backend = "poetry.core.masonry.api" + +[tool.poetry.build] +generate-setup-file = false +script = "build-module.py" + +[tool.poetry.extras] +pyarrow = ["pyarrow"] +pandas = ["pandas", "pyarrow"] +duckdb = ["duckdb", "pyarrow"] +ray = ["ray", "pyarrow", "pandas"] +snappy = ["python-snappy"] +hive = ["thrift"] +s3fs = ["s3fs"] +glue = ["boto3", "mypy-boto3-glue"] +adlfs = ["adlfs"] +dynamodb = ["boto3"] +zstandard = ["zstandard"] +sql-postgres = ["sqlalchemy", "psycopg2-binary"] +gcsfs = ["gcsfs"] + +[tool.pytest.ini_options] +markers = [ + "unmarked: marks a test as a unittest", + "s3: marks a test as requiring access to s3 compliant storage (use with --aws-access-key-id, --aws-secret-access-key, and --endpoint args)", + "adlfs: marks a test as requiring access to adlfs compliant storage (use with --adlfs.account-name, --adlfs.account-key, and --adlfs.endpoint args)", + "integration: marks integration tests against Apache Spark", + "gcs: marks a test as requiring access to gcs compliant storage (use with --gs.token, --gs.project, and --gs.endpoint)" +] + +[tool.black] +line-length = 130 +target-version = ['py38'] + +[tool.pycln] +all = true + +[tool.mypy] +mypy_path = "python" +no_implicit_optional = true +namespace_packages = false +warn_redundant_casts = true +warn_unreachable = true +warn_unused_ignores = true +disallow_any_generics = true +disallow_untyped_defs = true + +[[tool.mypy.overrides]] +module = "pyarrow.*" +ignore_missing_imports = true + +[[tool.mypy.overrides]] +module = "pandas.*" +ignore_missing_imports = true + +[[tool.mypy.overrides]] +module = "snappy.*" +ignore_missing_imports = true + +[[tool.mypy.overrides]] +module = "zstandard.*" +ignore_missing_imports = true + +[[tool.mypy.overrides]] +module = "pydantic.*" +ignore_missing_imports = true + +[[tool.mypy.overrides]] +module = "pydantic_core.*" +ignore_missing_imports = true + +[[tool.mypy.overrides]] +module = "pytest.*" +ignore_missing_imports = true + +[[tool.mypy.overrides]] +module = "fastavro.*" +ignore_missing_imports = true + +[[tool.mypy.overrides]] +module = "mmh3.*" +ignore_missing_imports = true + +[[tool.mypy.overrides]] +module = "hive_metastore.*" +ignore_missing_imports = true + +[[tool.mypy.overrides]] +module = "thrift.*" +ignore_missing_imports = true + +[[tool.mypy.overrides]] +module = "requests_mock.*" +ignore_missing_imports = true + +[[tool.mypy.overrides]] +module = "click.*" +ignore_missing_imports = true + +[[tool.mypy.overrides]] +module = "rich.*" +ignore_missing_imports = true + +[[tool.mypy.overrides]] +module = "fsspec.*" +ignore_missing_imports = true + +[[tool.mypy.overrides]] +module = "s3fs.*" +ignore_missing_imports = true + +[[tool.mypy.overrides]] +module = "azure.*" +ignore_missing_imports = true + +[[tool.mypy.overrides]] +module = "adlfs.*" +ignore_missing_imports = true + +[[tool.mypy.overrides]] +module = "gcsfs.*" +ignore_missing_imports = true + +[[tool.mypy.overrides]] +module = "packaging.*" +ignore_missing_imports = true + +[[tool.mypy.overrides]] +module = "tests.*" +ignore_missing_imports = true + +[[tool.mypy.overrides]] +module = "boto3" +ignore_missing_imports = true + +[[tool.mypy.overrides]] +module = "botocore.*" +ignore_missing_imports = true + +[[tool.mypy.overrides]] +module = "mypy_boto3_glue.*" +ignore_missing_imports = true + +[[tool.mypy.overrides]] +module = "moto" +ignore_missing_imports = true + +[[tool.mypy.overrides]] +module = "aiobotocore.*" +ignore_missing_imports = true + +[[tool.mypy.overrides]] +module = "aiohttp.*" +ignore_missing_imports = true + +[[tool.mypy.overrides]] +module = "duckdb.*" +ignore_missing_imports = true + +[[tool.mypy.overrides]] +module = "ray.*" +ignore_missing_imports = true + +[[tool.mypy.overrides]] +module = "pyparsing.*" +ignore_missing_imports = true + +[[tool.mypy.overrides]] +module = "pyspark.*" +ignore_missing_imports = true + +[[tool.mypy.overrides]] +module = "strictyaml.*" +ignore_missing_imports = true + +[[tool.mypy.overrides]] +module = "sortedcontainers.*" +ignore_missing_imports = true + +[[tool.mypy.overrides]] +module = "numpy.*" +ignore_missing_imports = true + +[[tool.mypy.overrides]] +module = "sqlalchemy.*" +ignore_missing_imports = true + +[[tool.mypy.overrides]] +module = "Cython.*" +ignore_missing_imports = true + +[[tool.mypy.overrides]] +module = "setuptools.*" +ignore_missing_imports = true + +[tool.coverage.run] +source = ['pyiceberg/'] + +[tool.ruff] +src = ['pyiceberg','tests'] +extend-exclude = ["dev/provision.py"] +# Enable the pycodestyle (`E`) and Pyflakes (`F`) rules by default. +# Unlike Flake8, Ruff doesn't enable pycodestyle warnings (`W`) or +# McCabe complexity (`C901`) by default. +select = [ + "E", # pycodestyle + "W", # pycodestyle + "F", # Pyflakes + "B", # flake8-bugbear + "C4", # flake8-comprehensions + "I", # isort + "UP", # pyupgrade +] +ignore = ["E501","E203","B024","B028"] + +# Allow autofix for all enabled rules (when `--fix`) is provided. +fixable = ["ALL"] +unfixable = [] + +# Exclude a variety of commonly ignored directories. +exclude = [ + ".bzr", + ".direnv", + ".eggs", + ".git", + ".git-rewrite", + ".hg", + ".mypy_cache", + ".nox", + ".pants.d", + ".pytype", + ".ruff_cache", + ".svn", + ".tox", + ".venv", + "__pypackages__", + "_build", + "buck-out", + "build", + "dist", + "node_modules", + "venv", +] +per-file-ignores = {} +# Ignore _all_ violations. +# Same as Black. +line-length = 130 + +# Allow unused variables when underscore-prefixed. +dummy-variable-rgx = "^(_+|(_+[a-zA-Z0-9_]*[a-zA-Z0-9]+?))$" + +[tool.ruff.pyupgrade] +# Preserve types, even if a file imports `from __future__ import annotations`. +keep-runtime-typing = true + +[tool.ruff.isort] +detect-same-package = true +lines-between-types = 0 +known-first-party = ["pyiceberg", "tests"] +section-order = ["future", "standard-library", "third-party", "first-party", "local-folder"] diff --git a/tests/avro/test_decoder.py b/tests/avro/test_decoder.py new file mode 100644 index 0000000000..fd660247cd --- /dev/null +++ b/tests/avro/test_decoder.py @@ -0,0 +1,207 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +from __future__ import annotations + +import itertools +import struct +from io import SEEK_SET +from types import TracebackType +from typing import Callable, Optional, Type +from unittest.mock import MagicMock, patch + +import pytest + +from pyiceberg.avro.decoder import BinaryDecoder, StreamingBinaryDecoder, new_decoder +from pyiceberg.avro.decoder_fast import CythonBinaryDecoder +from pyiceberg.avro.resolver import resolve +from pyiceberg.io import InputStream +from pyiceberg.types import DoubleType, FloatType + +AVAILABLE_DECODERS = [StreamingBinaryDecoder, CythonBinaryDecoder] + + +@pytest.mark.parametrize("decoder_class", AVAILABLE_DECODERS) +def test_read_boolean_true(decoder_class: Callable[[bytes], BinaryDecoder]) -> None: + decoder = decoder_class(b"\x01") + assert decoder.read_boolean() is True + + +@pytest.mark.parametrize("decoder_class", AVAILABLE_DECODERS) +def test_read_boolean_false(decoder_class: Callable[[bytes], BinaryDecoder]) -> None: + decoder = decoder_class(b"\x00") + assert decoder.read_boolean() is False + + +@pytest.mark.parametrize("decoder_class", AVAILABLE_DECODERS) +def test_skip_boolean(decoder_class: Callable[[bytes], BinaryDecoder]) -> None: + decoder = decoder_class(b"\x00") + assert decoder.tell() == 0 + decoder.skip_boolean() + assert decoder.tell() == 1 + + +@pytest.mark.parametrize("decoder_class", AVAILABLE_DECODERS) +def test_read_int(decoder_class: Callable[[bytes], BinaryDecoder]) -> None: + decoder = decoder_class(b"\x18") + assert decoder.read_int() == 12 + + +@pytest.mark.parametrize("decoder_class", AVAILABLE_DECODERS) +def test_read_int_longer(decoder_class: Callable[[bytes], BinaryDecoder]) -> None: + decoder = decoder_class(b"\x8e\xd1\x87\x01") + assert decoder.read_int() == 1111111 + + +def zigzag_encode(datum: int) -> bytes: + result = [] + datum = (datum << 1) ^ (datum >> 63) + while (datum & ~0x7F) != 0: + result.append(struct.pack("B", (datum & 0x7F) | 0x80)) + datum >>= 7 + result.append(struct.pack("B", datum)) + return b"".join(result) + + +@pytest.mark.parametrize( + "decoder_class, expected_value", + list(itertools.product(AVAILABLE_DECODERS, [0, -1, 2**32, -(2**32), (2**63 - 1), -(2**63)])), +) +def test_read_int_custom_encode(decoder_class: Callable[[bytes], BinaryDecoder], expected_value: int) -> None: + encoded = zigzag_encode(expected_value) + decoder = decoder_class(encoded) + decoded = decoder.read_int() + assert decoded == expected_value, f"Decoded value does not match decoded={decoded} expected={expected_value}" + + +@pytest.mark.parametrize("decoder_class", AVAILABLE_DECODERS) +def test_skip_int(decoder_class: Callable[[bytes], BinaryDecoder]) -> None: + decoder = decoder_class(b"\x18") + assert decoder.tell() == 0 + decoder.skip_int() + assert decoder.tell() == 1 + + +@pytest.mark.parametrize("decoder_class", AVAILABLE_DECODERS) +def test_read_negative_bytes(decoder_class: Callable[[bytes], BinaryDecoder]) -> None: + decoder = decoder_class(b"") + + with pytest.raises(ValueError) as exc_info: + decoder.read(-1) + + assert "Requested -1 bytes to read, expected positive integer." in str(exc_info.value) + + +class OneByteAtATimeInputStream(InputStream): + """ + Fake input stream that just returns a single byte at the time + """ + + pos = 0 + + def read(self, size: int = 0) -> bytes: + self.pos += 1 + return self.pos.to_bytes(1, byteorder="little") + + def seek(self, offset: int, whence: int = SEEK_SET) -> int: + self.pos = offset + return self.pos + + def tell(self) -> int: + return self.pos + + def close(self) -> None: + pass + + def __enter__(self) -> OneByteAtATimeInputStream: + return self + + def __exit__( + self, exctype: Optional[Type[BaseException]], excinst: Optional[BaseException], exctb: Optional[TracebackType] + ) -> None: + self.close() + + +# InMemoryBinaryDecoder doesn't work for a byte at a time reading +@pytest.mark.parametrize("decoder_class", [StreamingBinaryDecoder]) +def test_read_single_byte_at_the_time(decoder_class: Callable[[bytes], BinaryDecoder]) -> None: + decoder = decoder_class(OneByteAtATimeInputStream()) # type: ignore + assert decoder.read(2) == b"\x01\x02" + + +@pytest.mark.parametrize("decoder_class", AVAILABLE_DECODERS) +def test_read_float(decoder_class: Callable[[bytes], BinaryDecoder]) -> None: + decoder = decoder_class(b"\x00\x00\x9A\x41") + assert decoder.read_float() == 19.25 + + +@pytest.mark.parametrize("decoder_class", AVAILABLE_DECODERS) +def test_skip_float(decoder_class: Callable[[bytes], BinaryDecoder]) -> None: + decoder = decoder_class(b"\x00\x00\x9A\x41") + assert decoder.tell() == 0 + decoder.skip_float() + assert decoder.tell() == 4 + + +@pytest.mark.parametrize("decoder_class", AVAILABLE_DECODERS) +def test_read_double(decoder_class: Callable[[bytes], BinaryDecoder]) -> None: + decoder = decoder_class(b"\x00\x00\x00\x00\x00\x40\x33\x40") + assert decoder.read_double() == 19.25 + + +@pytest.mark.parametrize("decoder_class", AVAILABLE_DECODERS) +def test_skip_double(decoder_class: Callable[[bytes], BinaryDecoder]) -> None: + decoder = decoder_class(b"\x00\x00\x00\x00\x00\x40\x33\x40") + assert decoder.tell() == 0 + decoder.skip_double() + assert decoder.tell() == 8 + + +@pytest.mark.parametrize("decoder_class", AVAILABLE_DECODERS) +def test_read_bytes(decoder_class: Callable[[bytes], BinaryDecoder]) -> None: + decoder = decoder_class(b"\x08\x01\x02\x03\x04") + actual = decoder.read_bytes() + assert actual == b"\x01\x02\x03\x04" + + +@pytest.mark.parametrize("decoder_class", AVAILABLE_DECODERS) +def test_read_utf8(decoder_class: Callable[[bytes], BinaryDecoder]) -> None: + decoder = decoder_class(b"\x04\x76\x6F") + assert decoder.read_utf8() == "vo" + + +@pytest.mark.parametrize("decoder_class", AVAILABLE_DECODERS) +def test_skip_utf8(decoder_class: Callable[[bytes], BinaryDecoder]) -> None: + decoder = decoder_class(b"\x04\x76\x6F") + assert decoder.tell() == 0 + decoder.skip_utf8() + assert decoder.tell() == 3 + + +@pytest.mark.parametrize("decoder_class", AVAILABLE_DECODERS) +def test_read_int_as_float(decoder_class: Callable[[bytes], BinaryDecoder]) -> None: + decoder = decoder_class(b"\x00\x00\x9A\x41") + reader = resolve(FloatType(), DoubleType()) + assert reader.read(decoder) == 19.25 + + +@patch("pyiceberg.avro.decoder_fast.CythonBinaryDecoder") +def test_fallback_to_pure_python_decoder(cython_decoder: MagicMock) -> None: + cython_decoder.side_effect = ModuleNotFoundError + + with pytest.warns(UserWarning, match="Falling back to pure Python Avro decoder, missing Cython implementation"): + dec = new_decoder(b"") + assert isinstance(dec, StreamingBinaryDecoder) diff --git a/tests/avro/test_encoder.py b/tests/avro/test_encoder.py new file mode 100644 index 0000000000..5866719434 --- /dev/null +++ b/tests/avro/test_encoder.py @@ -0,0 +1,134 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +from __future__ import annotations + +import io +import struct +import uuid + +from pyiceberg.avro.encoder import BinaryEncoder + + +def test_write() -> None: + output = io.BytesIO() + encoder = BinaryEncoder(output) + + _input = b"\x12\x34\x56" + + encoder.write(_input) + + assert output.getbuffer() == _input + + +def test_write_boolean() -> None: + output = io.BytesIO() + encoder = BinaryEncoder(output) + + encoder.write_boolean(True) + encoder.write_boolean(False) + + assert output.getbuffer() == struct.pack("??", True, False) + + +def test_write_int() -> None: + output = io.BytesIO() + encoder = BinaryEncoder(output) + + _1byte_input = 2 + _2byte_input = 7466 + _3byte_input = 523490 + _4byte_input = 86561570 + _5byte_input = 2510416930 + _6byte_input = 734929016866 + _7byte_input = 135081528772642 + _8byte_input = 35124861473277986 + + encoder.write_int(_1byte_input) + encoder.write_int(_2byte_input) + encoder.write_int(_3byte_input) + encoder.write_int(_4byte_input) + encoder.write_int(_5byte_input) + encoder.write_int(_6byte_input) + encoder.write_int(_7byte_input) + encoder.write_int(_8byte_input) + + buffer = output.getbuffer() + + assert buffer[0:1] == b"\x04" + assert buffer[1:3] == b"\xd4\x74" + assert buffer[3:6] == b"\xc4\xf3\x3f" + assert buffer[6:10] == b"\xc4\xcc\xc6\x52" + assert buffer[10:15] == b"\xc4\xb0\x8f\xda\x12" + assert buffer[15:21] == b"\xc4\xe0\xf6\xd2\xe3\x2a" + assert buffer[21:28] == b"\xc4\xa0\xce\xe8\xe3\xb6\x3d" + assert buffer[28:36] == b"\xc4\xa0\xb2\xae\x83\xf8\xe4\x7c" + + +def test_write_float() -> None: + output = io.BytesIO() + encoder = BinaryEncoder(output) + + _input = 3.14159265359 + + encoder.write_float(_input) + + assert output.getbuffer() == struct.pack(" None: + output = io.BytesIO() + encoder = BinaryEncoder(output) + + _input = 3.14159265359 + + encoder.write_double(_input) + + assert output.getbuffer() == struct.pack(" None: + output = io.BytesIO() + encoder = BinaryEncoder(output) + + _input = b"\x12\x34\x56" + + encoder.write_bytes(_input) + + assert output.getbuffer() == b"".join([b"\x06", _input]) + + +def test_write_utf8() -> None: + output = io.BytesIO() + encoder = BinaryEncoder(output) + + _input = "That, my liege, is how we know the Earth to be banana-shaped." + bin_input = _input.encode() + encoder.write_utf8(_input) + + assert output.getbuffer() == b"".join([b"\x7a", bin_input]) + + +def test_write_uuid() -> None: + output = io.BytesIO() + encoder = BinaryEncoder(output) + + _input = uuid.UUID("12345678-1234-5678-1234-567812345678") + encoder.write_uuid(_input) + + buf = output.getbuffer() + assert len(buf) == 16 + assert buf.tobytes() == b"\x124Vx\x124Vx\x124Vx\x124Vx" diff --git a/tests/avro/test_file.py b/tests/avro/test_file.py new file mode 100644 index 0000000000..2738770492 --- /dev/null +++ b/tests/avro/test_file.py @@ -0,0 +1,289 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +import inspect +from datetime import date, datetime, time +from enum import Enum +from tempfile import TemporaryDirectory +from typing import Any +from uuid import UUID + +import pytest +from _decimal import Decimal +from fastavro import reader, writer + +import pyiceberg.avro.file as avro +from pyiceberg.avro.codecs import DeflateCodec +from pyiceberg.avro.file import META_SCHEMA, AvroFileHeader +from pyiceberg.io.pyarrow import PyArrowFileIO +from pyiceberg.manifest import ( + MANIFEST_ENTRY_SCHEMA, + DataFile, + DataFileContent, + FileFormat, + ManifestEntry, + ManifestEntryStatus, +) +from pyiceberg.schema import Schema +from pyiceberg.typedef import Record +from pyiceberg.types import ( + BooleanType, + DateType, + DecimalType, + DoubleType, + FixedType, + FloatType, + IntegerType, + LongType, + NestedField, + StringType, + TimestampType, + TimestamptzType, + TimeType, + UUIDType, +) +from pyiceberg.utils.schema_conversion import AvroSchemaConversion + + +def get_deflate_compressor() -> None: + header = AvroFileHeader(struct=META_SCHEMA) + header[0] = bytes(0) + header[1] = {"avro.codec": "deflate"} + header[2] = bytes(16) + assert header.compression_codec() == DeflateCodec + + +def get_null_compressor() -> None: + header = AvroFileHeader(struct=META_SCHEMA) + header[0] = bytes(0) + header[1] = {"avro.codec": "null"} + header[2] = bytes(16) + assert header.compression_codec() is None + + +def test_unknown_codec() -> None: + header = AvroFileHeader(struct=META_SCHEMA) + header[0] = bytes(0) + header[1] = {"avro.codec": "unknown"} + header[2] = bytes(16) + + with pytest.raises(ValueError) as exc_info: + header.compression_codec() + + assert "Unsupported codec: unknown" in str(exc_info.value) + + +def test_missing_schema() -> None: + header = AvroFileHeader(struct=META_SCHEMA) + header[0] = bytes(0) + header[1] = {} + header[2] = bytes(16) + + with pytest.raises(ValueError) as exc_info: + header.get_schema() + + assert "No schema found in Avro file headers" in str(exc_info.value) + + +# helper function to serialize our objects to dicts to enable +# direct comparison with the dicts returned by fastavro +def todict(obj: Any) -> Any: + if isinstance(obj, dict): + data = [] + for k, v in obj.items(): + data.append({"key": k, "value": v}) + return data + elif isinstance(obj, Enum): + return obj.value + elif hasattr(obj, "__iter__") and not isinstance(obj, str) and not isinstance(obj, bytes): + return [todict(v) for v in obj] + elif hasattr(obj, "__dict__"): + return {key: todict(value) for key, value in inspect.getmembers(obj) if not callable(value) and not key.startswith("_")} + else: + return obj + + +def test_write_manifest_entry_with_iceberg_read_with_fastavro() -> None: + data_file = DataFile( + content=DataFileContent.DATA, + file_path="s3://some-path/some-file.parquet", + file_format=FileFormat.PARQUET, + partition=Record(), + record_count=131327, + file_size_in_bytes=220669226, + column_sizes={1: 220661854}, + value_counts={1: 131327}, + null_value_counts={1: 0}, + nan_value_counts={}, + lower_bounds={1: b"aaaaaaaaaaaaaaaa"}, + upper_bounds={1: b"zzzzzzzzzzzzzzzz"}, + key_metadata=b"\xde\xad\xbe\xef", + split_offsets=[4, 133697593], + equality_ids=[], + sort_order_id=4, + spec_id=3, + ) + entry = ManifestEntry( + status=ManifestEntryStatus.ADDED, + snapshot_id=8638475580105682862, + data_sequence_number=0, + file_sequence_number=0, + data_file=data_file, + ) + + additional_metadata = {"foo": "bar"} + + with TemporaryDirectory() as tmpdir: + tmp_avro_file = tmpdir + "/manifest_entry.avro" + + with avro.AvroOutputFile[ManifestEntry]( + PyArrowFileIO().new_output(tmp_avro_file), MANIFEST_ENTRY_SCHEMA, "manifest_entry", additional_metadata + ) as out: + out.write_block([entry]) + + with open(tmp_avro_file, "rb") as fo: + r = reader(fo=fo) + + for k, v in additional_metadata.items(): + assert k in r.metadata + assert v == r.metadata[k] + + it = iter(r) + + fa_entry = next(it) + + assert todict(entry) == fa_entry + + +def test_write_manifest_entry_with_fastavro_read_with_iceberg() -> None: + data_file = DataFile( + content=DataFileContent.DATA, + file_path="s3://some-path/some-file.parquet", + file_format=FileFormat.PARQUET, + partition=Record(), + record_count=131327, + file_size_in_bytes=220669226, + column_sizes={1: 220661854}, + value_counts={1: 131327}, + null_value_counts={1: 0}, + nan_value_counts={}, + lower_bounds={1: b"aaaaaaaaaaaaaaaa"}, + upper_bounds={1: b"zzzzzzzzzzzzzzzz"}, + key_metadata=b"\xde\xad\xbe\xef", + split_offsets=[4, 133697593], + equality_ids=[], + sort_order_id=4, + spec_id=3, + ) + entry = ManifestEntry( + status=ManifestEntryStatus.ADDED, + snapshot_id=8638475580105682862, + data_sequence_number=0, + file_sequence_number=0, + data_file=data_file, + ) + + with TemporaryDirectory() as tmpdir: + tmp_avro_file = tmpdir + "/manifest_entry.avro" + + schema = AvroSchemaConversion().iceberg_to_avro(MANIFEST_ENTRY_SCHEMA, schema_name="manifest_entry") + + with open(tmp_avro_file, "wb") as out: + writer(out, schema, [todict(entry)]) + + with avro.AvroFile[ManifestEntry]( + PyArrowFileIO().new_input(tmp_avro_file), + MANIFEST_ENTRY_SCHEMA, + {-1: ManifestEntry, 2: DataFile}, + ) as avro_reader: + it = iter(avro_reader) + avro_entry = next(it) + + assert entry == avro_entry + + +@pytest.mark.parametrize("is_required", [True, False]) +def test_all_primitive_types(is_required: bool) -> None: + all_primitives_schema = Schema( + NestedField(field_id=1, name="field_fixed", field_type=FixedType(16), required=is_required), + NestedField(field_id=2, name="field_decimal", field_type=DecimalType(6, 2), required=is_required), + NestedField(field_id=3, name="field_bool", field_type=BooleanType(), required=is_required), + NestedField(field_id=4, name="field_int", field_type=IntegerType(), required=True), + NestedField(field_id=5, name="field_long", field_type=LongType(), required=is_required), + NestedField(field_id=6, name="field_float", field_type=FloatType(), required=is_required), + NestedField(field_id=7, name="field_double", field_type=DoubleType(), required=is_required), + NestedField(field_id=8, name="field_date", field_type=DateType(), required=is_required), + NestedField(field_id=9, name="field_time", field_type=TimeType(), required=is_required), + NestedField(field_id=10, name="field_timestamp", field_type=TimestampType(), required=is_required), + NestedField(field_id=11, name="field_timestamptz", field_type=TimestamptzType(), required=is_required), + NestedField(field_id=12, name="field_string", field_type=StringType(), required=is_required), + NestedField(field_id=13, name="field_uuid", field_type=UUIDType(), required=is_required), + schema_id=1, + ) + + class AllPrimitivesRecord(Record): + field_fixed: bytes + field_decimal: Decimal + field_bool: bool + field_int: int + field_long: int + field_float: float + field_double: float + field_date: date + field_time: time + field_timestamp: datetime + field_timestamptz: datetime + field_string: str + field_uuid: UUID + + def __init__(self, *data: Any, **named_data: Any) -> None: + super().__init__(*data, **{"struct": all_primitives_schema.as_struct(), **named_data}) + + record = AllPrimitivesRecord( + b"\x124Vx\x124Vx\x124Vx\x124Vx", + Decimal("123.45"), + True, + 123, + 429496729622, + 123.22000122070312, + 429496729622.314, + 19052, + 69922000000, + 1677629965000000, + 1677629965000000, + "this is a sentence", + UUID("12345678-1234-5678-1234-567812345678"), + ) + + with TemporaryDirectory() as tmpdir: + tmp_avro_file = tmpdir + "/all_primitives.avro" + # write to disk + with avro.AvroOutputFile[AllPrimitivesRecord]( + PyArrowFileIO().new_output(tmp_avro_file), all_primitives_schema, "all_primitives_schema" + ) as out: + out.write_block([record]) + + # read from disk + with avro.AvroFile[AllPrimitivesRecord]( + PyArrowFileIO().new_input(tmp_avro_file), + all_primitives_schema, + {-1: AllPrimitivesRecord}, + ) as avro_reader: + it = iter(avro_reader) + avro_entry = next(it) + + for idx, field in enumerate(all_primitives_schema.as_struct()): + assert record[idx] == avro_entry[idx], f"Invalid {field}" diff --git a/tests/avro/test_reader.py b/tests/avro/test_reader.py new file mode 100644 index 0000000000..a3a502bcff --- /dev/null +++ b/tests/avro/test_reader.py @@ -0,0 +1,385 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# pylint:disable=protected-access +import json +from typing import Callable + +import pytest + +from pyiceberg.avro.decoder import BinaryDecoder, StreamingBinaryDecoder +from pyiceberg.avro.decoder_fast import CythonBinaryDecoder +from pyiceberg.avro.file import AvroFile +from pyiceberg.avro.reader import ( + BinaryReader, + BooleanReader, + DateReader, + DecimalReader, + DoubleReader, + FixedReader, + FloatReader, + IntegerReader, + StringReader, + StructReader, + TimeReader, + TimestampReader, + TimestamptzReader, + UUIDReader, +) +from pyiceberg.avro.resolver import construct_reader +from pyiceberg.io.pyarrow import PyArrowFileIO +from pyiceberg.manifest import MANIFEST_ENTRY_SCHEMA, DataFile, ManifestEntry +from pyiceberg.schema import Schema +from pyiceberg.typedef import Record +from pyiceberg.types import ( + BinaryType, + BooleanType, + DateType, + DecimalType, + DoubleType, + FixedType, + FloatType, + IntegerType, + LongType, + NestedField, + PrimitiveType, + StringType, + StructType, + TimestampType, + TimestamptzType, + TimeType, + UUIDType, +) + +AVAILABLE_DECODERS = [StreamingBinaryDecoder, CythonBinaryDecoder] + + +def test_read_header(generated_manifest_entry_file: str, iceberg_manifest_entry_schema: Schema) -> None: + with AvroFile[ManifestEntry]( + PyArrowFileIO().new_input(generated_manifest_entry_file), + MANIFEST_ENTRY_SCHEMA, + {-1: ManifestEntry, 2: DataFile}, + ) as reader: + header = reader.header + + assert header.magic == b"Obj\x01" + assert json.loads(header.meta["avro.schema"]) == { + "type": "record", + "name": "manifest_entry", + "fields": [ + {"field-id": 0, "name": "status", "type": "int"}, + {"field-id": 1, "default": None, "name": "snapshot_id", "type": ["null", "long"]}, + { + "field-id": 2, + "name": "data_file", + "type": { + "type": "record", + "name": "r2", + "fields": [ + {"field-id": 100, "doc": "Location URI with FS scheme", "name": "file_path", "type": "string"}, + { + "field-id": 101, + "doc": "File format name: avro, orc, or parquet", + "name": "file_format", + "type": "string", + }, + { + "field-id": 102, + "name": "partition", + "type": { + "type": "record", + "name": "r102", + "fields": [ + {"field-id": 1000, "default": None, "name": "VendorID", "type": ["null", "int"]}, + { + "field-id": 1001, + "default": None, + "name": "tpep_pickup_datetime", + "type": ["null", {"type": "int", "logicalType": "date"}], + }, + ], + }, + }, + {"field-id": 103, "doc": "Number of records in the file", "name": "record_count", "type": "long"}, + {"field-id": 104, "doc": "Total file size in bytes", "name": "file_size_in_bytes", "type": "long"}, + {"field-id": 105, "name": "block_size_in_bytes", "type": "long"}, + { + "field-id": 108, + "doc": "Map of column id to total size on disk", + "default": None, + "name": "column_sizes", + "type": [ + "null", + { + "logicalType": "map", + "type": "array", + "items": { + "type": "record", + "name": "k117_v118", + "fields": [ + {"field-id": 117, "name": "key", "type": "int"}, + {"field-id": 118, "name": "value", "type": "long"}, + ], + }, + }, + ], + }, + { + "field-id": 109, + "doc": "Map of column id to total count, including null and NaN", + "default": None, + "name": "value_counts", + "type": [ + "null", + { + "logicalType": "map", + "type": "array", + "items": { + "type": "record", + "name": "k119_v120", + "fields": [ + {"field-id": 119, "name": "key", "type": "int"}, + {"field-id": 120, "name": "value", "type": "long"}, + ], + }, + }, + ], + }, + { + "field-id": 110, + "doc": "Map of column id to null value count", + "default": None, + "name": "null_value_counts", + "type": [ + "null", + { + "logicalType": "map", + "type": "array", + "items": { + "type": "record", + "name": "k121_v122", + "fields": [ + {"field-id": 121, "name": "key", "type": "int"}, + {"field-id": 122, "name": "value", "type": "long"}, + ], + }, + }, + ], + }, + { + "field-id": 137, + "doc": "Map of column id to number of NaN values in the column", + "default": None, + "name": "nan_value_counts", + "type": [ + "null", + { + "logicalType": "map", + "type": "array", + "items": { + "type": "record", + "name": "k138_v139", + "fields": [ + {"field-id": 138, "name": "key", "type": "int"}, + {"field-id": 139, "name": "value", "type": "long"}, + ], + }, + }, + ], + }, + { + "field-id": 125, + "doc": "Map of column id to lower bound", + "default": None, + "name": "lower_bounds", + "type": [ + "null", + { + "logicalType": "map", + "type": "array", + "items": { + "type": "record", + "name": "k126_v127", + "fields": [ + {"field-id": 126, "name": "key", "type": "int"}, + {"field-id": 127, "name": "value", "type": "bytes"}, + ], + }, + }, + ], + }, + { + "field-id": 128, + "doc": "Map of column id to upper bound", + "default": None, + "name": "upper_bounds", + "type": [ + "null", + { + "logicalType": "map", + "type": "array", + "items": { + "type": "record", + "name": "k129_v130", + "fields": [ + {"field-id": 129, "name": "key", "type": "int"}, + {"field-id": 130, "name": "value", "type": "bytes"}, + ], + }, + }, + ], + }, + { + "field-id": 131, + "doc": "Encryption key metadata blob", + "default": None, + "name": "key_metadata", + "type": ["null", "bytes"], + }, + { + "field-id": 132, + "doc": "Splittable offsets", + "default": None, + "name": "split_offsets", + "type": ["null", {"element-id": 133, "type": "array", "items": "long"}], + }, + { + "field-id": 140, + "doc": "Sort order ID", + "default": None, + "name": "sort_order_id", + "type": ["null", "int"], + }, + ], + }, + }, + ], + } + + assert header.get_schema() == iceberg_manifest_entry_schema + + +def test_fixed_reader() -> None: + assert construct_reader(FixedType(22)) == FixedReader(22) + + +def test_decimal_reader() -> None: + assert construct_reader(DecimalType(19, 25)) == DecimalReader(19, 25) + + +def test_boolean_reader() -> None: + assert construct_reader(BooleanType()) == BooleanReader() + + +def test_integer_reader() -> None: + assert construct_reader(IntegerType()) == IntegerReader() + + +def test_long_reader() -> None: + assert construct_reader(LongType()) == IntegerReader() + + +def test_float_reader() -> None: + assert construct_reader(FloatType()) == FloatReader() + + +def test_double_reader() -> None: + assert construct_reader(DoubleType()) == DoubleReader() + + +def test_date_reader() -> None: + assert construct_reader(DateType()) == DateReader() + + +def test_time_reader() -> None: + assert construct_reader(TimeType()) == TimeReader() + + +def test_timestamp_reader() -> None: + assert construct_reader(TimestampType()) == TimestampReader() + + +def test_timestamptz_reader() -> None: + assert construct_reader(TimestamptzType()) == TimestamptzReader() + + +def test_string_reader() -> None: + assert construct_reader(StringType()) == StringReader() + + +def test_binary_reader() -> None: + assert construct_reader(BinaryType()) == BinaryReader() + + +def test_unknown_type() -> None: + class UnknownType(PrimitiveType): + root: str = "UnknownType" + + with pytest.raises(ValueError) as exc_info: + construct_reader(UnknownType()) + + assert "Unknown type:" in str(exc_info.value) + + +def test_uuid_reader() -> None: + assert construct_reader(UUIDType()) == UUIDReader() + + +@pytest.mark.parametrize("decoder_class", AVAILABLE_DECODERS) +def test_read_struct(decoder_class: Callable[[bytes], BinaryDecoder]) -> None: + decoder = decoder_class(b"\x18") + struct = StructType(NestedField(1, "id", IntegerType(), required=True)) + result = StructReader(((0, IntegerReader()),), Record, struct).read(decoder) + assert repr(result) == "Record[id=12]" + + +@pytest.mark.parametrize("decoder_class", AVAILABLE_DECODERS) +def test_read_struct_lambda(decoder_class: Callable[[bytes], BinaryDecoder]) -> None: + decoder = decoder_class(b"\x18") + + struct = StructType(NestedField(1, "id", IntegerType(), required=True)) + # You can also pass in an arbitrary function that returns a struct + result = StructReader( + ((0, IntegerReader()),), lambda struct: Record(struct=struct), struct # pylint: disable=unnecessary-lambda + ).read(decoder) + assert repr(result) == "Record[id=12]" + + +@pytest.mark.parametrize("decoder_class", AVAILABLE_DECODERS) +def test_read_not_struct_type(decoder_class: Callable[[bytes], BinaryDecoder]) -> None: + decoder = decoder_class(b"\x18") + + struct = StructType(NestedField(1, "id", IntegerType(), required=True)) + with pytest.raises(ValueError) as exc_info: + _ = StructReader(((0, IntegerReader()),), str, struct).read(decoder) # type: ignore + + assert "Incompatible with StructProtocol: " in str(exc_info.value) + + +@pytest.mark.parametrize("decoder_class", AVAILABLE_DECODERS) +def test_read_struct_exception_handling(decoder_class: Callable[[bytes], BinaryDecoder]) -> None: + decoder = decoder_class(b"\x18") + + def raise_err(struct: StructType) -> None: + raise TypeError("boom") + + struct = StructType(NestedField(1, "id", IntegerType(), required=True)) + # You can also pass in an arbitrary function that returns a struct + + with pytest.raises(ValueError) as exc_info: + _ = StructReader(((0, IntegerReader()),), raise_err, struct).read(decoder) # type: ignore + + assert "Unable to initialize struct:" in str(exc_info.value) diff --git a/tests/avro/test_resolver.py b/tests/avro/test_resolver.py new file mode 100644 index 0000000000..a302294755 --- /dev/null +++ b/tests/avro/test_resolver.py @@ -0,0 +1,303 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from tempfile import TemporaryDirectory +from typing import Optional + +import pytest +from pydantic import Field + +from pyiceberg.avro.file import AvroFile +from pyiceberg.avro.reader import ( + DecimalReader, + DefaultReader, + DoubleReader, + FloatReader, + IntegerReader, + MapReader, + StringReader, + StructReader, +) +from pyiceberg.avro.resolver import ResolveError, resolve +from pyiceberg.io.pyarrow import PyArrowFileIO +from pyiceberg.schema import Schema +from pyiceberg.typedef import Record +from pyiceberg.types import ( + BinaryType, + DecimalType, + DoubleType, + FloatType, + IntegerType, + ListType, + LongType, + MapType, + NestedField, + StringType, + StructType, +) + + +def test_resolver() -> None: + write_schema = Schema( + NestedField(1, "id", LongType()), + NestedField(2, "data", StringType()), + NestedField( + 3, + "location", + StructType( + NestedField(4, "lat", DoubleType()), + NestedField(5, "long", DoubleType()), + ), + ), + NestedField(6, "preferences", MapType(7, StringType(), 8, StringType())), + schema_id=1, + ) + + location_struct = StructType( + NestedField(4, "lat", DoubleType()), + NestedField(5, "long", DoubleType()), + ) + read_schema = Schema( + NestedField( + 3, + "location", + location_struct, + ), + NestedField(1, "id", LongType()), + NestedField(6, "preferences", MapType(7, StringType(), 8, StringType())), + schema_id=1, + ) + read_tree = resolve(write_schema, read_schema) + + assert read_tree == StructReader( + ( + (1, IntegerReader()), + (None, StringReader()), + ( + 0, + StructReader( + ( + (0, DoubleReader()), + (1, DoubleReader()), + ), + Record, + location_struct, + ), + ), + (2, MapReader(StringReader(), StringReader())), + ), + Record, + read_schema.as_struct(), + ) + + +def test_resolver_new_required_field() -> None: + write_schema = Schema( + NestedField(1, "id", LongType()), + schema_id=1, + ) + read_schema = Schema( + NestedField(1, "id", LongType()), + NestedField(2, "data", StringType(), required=True), + schema_id=1, + ) + + with pytest.raises(ResolveError) as exc_info: + resolve(write_schema, read_schema) + + assert "2: data: required string is non-optional, and not part of the file schema" in str(exc_info.value) + + +def test_resolver_invalid_evolution() -> None: + write_schema = Schema( + NestedField(1, "id", LongType()), + schema_id=1, + ) + read_schema = Schema( + NestedField(1, "id", DoubleType()), + schema_id=1, + ) + + with pytest.raises(ResolveError) as exc_info: + resolve(write_schema, read_schema) + + assert "Cannot promote long to double" in str(exc_info.value) + + +def test_resolver_promotion_string_to_binary() -> None: + write_schema = Schema( + NestedField(1, "id", StringType()), + schema_id=1, + ) + read_schema = Schema( + NestedField(1, "id", BinaryType()), + schema_id=1, + ) + resolve(write_schema, read_schema) + + +def test_resolver_promotion_binary_to_string() -> None: + write_schema = Schema( + NestedField(1, "id", BinaryType()), + schema_id=1, + ) + read_schema = Schema( + NestedField(1, "id", StringType()), + schema_id=1, + ) + resolve(write_schema, read_schema) + + +def test_resolver_change_type() -> None: + write_schema = Schema( + NestedField(1, "properties", ListType(2, StringType())), + schema_id=1, + ) + read_schema = Schema( + NestedField(1, "properties", MapType(2, StringType(), 3, StringType())), + schema_id=1, + ) + + with pytest.raises(ResolveError) as exc_info: + resolve(write_schema, read_schema) + + assert "File/read schema are not aligned for list, got map" in str(exc_info.value) + + +def test_resolve_int_to_long() -> None: + assert resolve(IntegerType(), LongType()) == IntegerReader() + + +def test_resolve_float_to_double() -> None: + # We should still read floats, because it is encoded in 4 bytes + assert resolve(FloatType(), DoubleType()) == FloatReader() + + +def test_resolve_decimal_to_decimal() -> None: + # DecimalType(P, S) to DecimalType(P2, S) where P2 > P + assert resolve(DecimalType(19, 25), DecimalType(22, 25)) == DecimalReader(19, 25) + + +def test_struct_not_aligned() -> None: + with pytest.raises(ResolveError): + assert resolve(StructType(), StringType()) + + +def test_map_not_aligned() -> None: + with pytest.raises(ResolveError): + assert resolve(MapType(1, StringType(), 2, IntegerType()), StringType()) + + +def test_primitive_not_aligned() -> None: + with pytest.raises(ResolveError): + assert resolve(IntegerType(), MapType(1, StringType(), 2, IntegerType())) + + +def test_integer_not_aligned() -> None: + with pytest.raises(ResolveError): + assert resolve(IntegerType(), StringType()) + + +def test_float_not_aligned() -> None: + with pytest.raises(ResolveError): + assert resolve(FloatType(), StringType()) + + +def test_string_not_aligned() -> None: + with pytest.raises(ResolveError): + assert resolve(StringType(), FloatType()) + + +def test_binary_not_aligned() -> None: + with pytest.raises(ResolveError): + assert resolve(BinaryType(), FloatType()) + + +def test_decimal_not_aligned() -> None: + with pytest.raises(ResolveError): + assert resolve(DecimalType(22, 19), StringType()) + + +def test_resolve_decimal_to_decimal_reduce_precision() -> None: + # DecimalType(P, S) to DecimalType(P2, S) where P2 > P + with pytest.raises(ResolveError) as exc_info: + _ = resolve(DecimalType(19, 25), DecimalType(10, 25)) == DecimalReader(22, 25) + + assert "Cannot reduce precision from decimal(19, 25) to decimal(10, 25)" in str(exc_info.value) + + +def test_column_assignment() -> None: + int_schema = { + "type": "record", + "name": "ints", + "fields": [ + {"name": "a", "type": "int", "field-id": 1}, + {"name": "b", "type": "int", "field-id": 2}, + {"name": "c", "type": "int", "field-id": 3}, + ], + } + + from fastavro import parse_schema, writer + + parsed_schema = parse_schema(int_schema) + + int_records = [ + { + "a": 1, + "b": 2, + "c": 3, + } + ] + + with TemporaryDirectory() as tmpdir: + tmp_avro_file = tmpdir + "/manifest.avro" + with open(tmp_avro_file, "wb") as out: + writer(out, parsed_schema, int_records) + + class Ints(Record): + c: int = Field() + d: Optional[int] = Field() + + MANIFEST_ENTRY_SCHEMA = Schema( + NestedField(3, "c", IntegerType(), required=True), + NestedField(4, "d", IntegerType(), required=False), + ) + + with AvroFile[Ints](PyArrowFileIO().new_input(tmp_avro_file), MANIFEST_ENTRY_SCHEMA, {-1: Ints}) as reader: + records = list(reader) + + assert repr(records) == "[Ints[c=3, d=None]]" + + +def test_resolver_initial_value() -> None: + write_schema = Schema( + NestedField(1, "name", StringType()), + schema_id=1, + ) + read_schema = Schema( + NestedField(2, "something", StringType(), required=False, initial_default="vo"), + schema_id=2, + ) + + assert resolve(write_schema, read_schema) == StructReader( + ( + (None, StringReader()), # The one we skip + (0, DefaultReader("vo")), + ), + Record, + read_schema.as_struct(), + ) diff --git a/tests/avro/test_writer.py b/tests/avro/test_writer.py new file mode 100644 index 0000000000..991d9d1ae7 --- /dev/null +++ b/tests/avro/test_writer.py @@ -0,0 +1,237 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# pylint:disable=protected-access + +import io +import struct +from typing import Dict, List + +import pytest +from _decimal import Decimal + +from pyiceberg.avro.encoder import BinaryEncoder +from pyiceberg.avro.resolver import construct_writer +from pyiceberg.avro.writer import ( + BinaryWriter, + BooleanWriter, + DateWriter, + DecimalWriter, + DoubleWriter, + FixedWriter, + FloatWriter, + IntegerWriter, + StringWriter, + TimestamptzWriter, + TimestampWriter, + TimeWriter, + UUIDWriter, +) +from pyiceberg.typedef import Record +from pyiceberg.types import ( + BinaryType, + BooleanType, + DateType, + DecimalType, + DoubleType, + FixedType, + FloatType, + IntegerType, + ListType, + LongType, + MapType, + NestedField, + PrimitiveType, + StringType, + StructType, + TimestampType, + TimestamptzType, + TimeType, + UUIDType, +) + + +def zigzag_encode(datum: int) -> bytes: + result = [] + datum = (datum << 1) ^ (datum >> 63) + while (datum & ~0x7F) != 0: + result.append(struct.pack("B", (datum & 0x7F) | 0x80)) + datum >>= 7 + result.append(struct.pack("B", datum)) + return b"".join(result) + + +def test_fixed_writer() -> None: + assert construct_writer(FixedType(22)) == FixedWriter(22) + + +def test_decimal_writer() -> None: + assert construct_writer(DecimalType(19, 25)) == DecimalWriter(19, 25) + + +def test_boolean_writer() -> None: + assert construct_writer(BooleanType()) == BooleanWriter() + + +def test_integer_writer() -> None: + assert construct_writer(IntegerType()) == IntegerWriter() + + +def test_long_writer() -> None: + assert construct_writer(LongType()) == IntegerWriter() + + +def test_float_writer() -> None: + assert construct_writer(FloatType()) == FloatWriter() + + +def test_double_writer() -> None: + assert construct_writer(DoubleType()) == DoubleWriter() + + +def test_date_writer() -> None: + assert construct_writer(DateType()) == DateWriter() + + +def test_time_writer() -> None: + assert construct_writer(TimeType()) == TimeWriter() + + +def test_timestamp_writer() -> None: + assert construct_writer(TimestampType()) == TimestampWriter() + + +def test_timestamptz_writer() -> None: + assert construct_writer(TimestamptzType()) == TimestamptzWriter() + + +def test_string_writer() -> None: + assert construct_writer(StringType()) == StringWriter() + + +def test_binary_writer() -> None: + assert construct_writer(BinaryType()) == BinaryWriter() + + +def test_unknown_type() -> None: + class UnknownType(PrimitiveType): + root: str = "UnknownType" + + with pytest.raises(ValueError) as exc_info: + construct_writer(UnknownType()) + + assert "Unknown type:" in str(exc_info.value) + + +def test_uuid_writer() -> None: + assert construct_writer(UUIDType()) == UUIDWriter() + + +def test_write_simple_struct() -> None: + output = io.BytesIO() + encoder = BinaryEncoder(output) + + schema = StructType( + NestedField(1, "id", IntegerType(), required=True), NestedField(2, "property", StringType(), required=True) + ) + + class MyStruct(Record): + id: int + property: str + + my_struct = MyStruct(id=12, property="awesome") + + enc_str = b"awesome" + + construct_writer(schema).write(encoder, my_struct) + + assert output.getbuffer() == b"".join([b"\x18", zigzag_encode(len(enc_str)), enc_str]) + + +def test_write_struct_with_dict() -> None: + output = io.BytesIO() + encoder = BinaryEncoder(output) + + schema = StructType( + NestedField(1, "id", IntegerType(), required=True), + NestedField(2, "properties", MapType(3, IntegerType(), 4, IntegerType()), required=True), + ) + + class MyStruct(Record): + id: int + properties: Dict[int, int] + + my_struct = MyStruct(id=12, properties={1: 2, 3: 4}) + + construct_writer(schema).write(encoder, my_struct) + + assert output.getbuffer() == b"".join( + [ + b"\x18", + zigzag_encode(len(my_struct.properties)), + zigzag_encode(1), + zigzag_encode(2), + zigzag_encode(3), + zigzag_encode(4), + b"\x00", + ] + ) + + +def test_write_struct_with_list() -> None: + output = io.BytesIO() + encoder = BinaryEncoder(output) + + schema = StructType( + NestedField(1, "id", IntegerType(), required=True), + NestedField(2, "properties", ListType(3, IntegerType()), required=True), + ) + + class MyStruct(Record): + id: int + properties: List[int] + + my_struct = MyStruct(id=12, properties=[1, 2, 3, 4]) + + construct_writer(schema).write(encoder, my_struct) + + assert output.getbuffer() == b"".join( + [ + b"\x18", + zigzag_encode(len(my_struct.properties)), + zigzag_encode(1), + zigzag_encode(2), + zigzag_encode(3), + zigzag_encode(4), + b"\x00", + ] + ) + + +def test_write_decimal() -> None: + output = io.BytesIO() + encoder = BinaryEncoder(output) + + schema = StructType( + NestedField(1, "decimal", DecimalType(10, 2), required=True), + ) + + class MyStruct(Record): + decimal: Decimal + + construct_writer(schema).write(encoder, MyStruct(Decimal("1000.12"))) + + assert output.getvalue() == b"\x00\x00\x01\x86\xac" diff --git a/tests/catalog/integration_test_dynamodb.py b/tests/catalog/integration_test_dynamodb.py new file mode 100644 index 0000000000..5ca8767d6d --- /dev/null +++ b/tests/catalog/integration_test_dynamodb.py @@ -0,0 +1,255 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from typing import Generator, List + +import boto3 +import pytest +from botocore.exceptions import ClientError + +from pyiceberg.catalog import Catalog +from pyiceberg.catalog.dynamodb import DynamoDbCatalog +from pyiceberg.exceptions import ( + NamespaceAlreadyExistsError, + NamespaceNotEmptyError, + NoSuchNamespaceError, + NoSuchTableError, + TableAlreadyExistsError, +) +from pyiceberg.schema import Schema +from tests.conftest import clean_up, get_bucket_name, get_s3_path + +# The number of tables/databases used in list_table/namespace test +LIST_TEST_NUMBER = 2 + + +@pytest.fixture(name="dynamodb", scope="module") +def fixture_dynamodb_client() -> boto3.client: + yield boto3.client("dynamodb") + + +@pytest.fixture(name="test_catalog", scope="module") +def fixture_test_catalog() -> Generator[Catalog, None, None]: + """Configure the pre- and post-setting of aws integration test.""" + test_catalog = DynamoDbCatalog("test_ddb_catalog", warehouse=get_s3_path(get_bucket_name())) + yield test_catalog + clean_up(test_catalog) + + +def test_create_table( + test_catalog: Catalog, s3: boto3.client, table_schema_nested: Schema, database_name: str, table_name: str +) -> None: + identifier = (database_name, table_name) + test_catalog.create_namespace(database_name) + test_catalog.create_table(identifier, table_schema_nested, get_s3_path(get_bucket_name(), database_name, table_name)) + table = test_catalog.load_table(identifier) + assert table.identifier == (test_catalog.name,) + identifier + metadata_location = table.metadata_location.split(get_bucket_name())[1][1:] + s3.head_object(Bucket=get_bucket_name(), Key=metadata_location) + + +def test_create_table_with_invalid_location(table_schema_nested: Schema, database_name: str, table_name: str) -> None: + identifier = (database_name, table_name) + test_catalog_no_warehouse = DynamoDbCatalog("test_ddb_catalog") + test_catalog_no_warehouse.create_namespace(database_name) + with pytest.raises(ValueError): + test_catalog_no_warehouse.create_table(identifier, table_schema_nested) + test_catalog_no_warehouse.drop_namespace(database_name) + + +def test_create_table_with_default_location( + test_catalog: Catalog, s3: boto3.client, table_schema_nested: Schema, database_name: str, table_name: str +) -> None: + identifier = (database_name, table_name) + test_catalog.create_namespace(database_name) + test_catalog.create_table(identifier, table_schema_nested) + table = test_catalog.load_table(identifier) + assert table.identifier == (test_catalog.name,) + identifier + metadata_location = table.metadata_location.split(get_bucket_name())[1][1:] + s3.head_object(Bucket=get_bucket_name(), Key=metadata_location) + + +def test_create_table_with_invalid_database(test_catalog: Catalog, table_schema_nested: Schema, table_name: str) -> None: + identifier = ("invalid", table_name) + with pytest.raises(NoSuchNamespaceError): + test_catalog.create_table(identifier, table_schema_nested) + + +def test_create_duplicated_table(test_catalog: Catalog, table_schema_nested: Schema, database_name: str, table_name: str) -> None: + test_catalog.create_namespace(database_name) + test_catalog.create_table((database_name, table_name), table_schema_nested) + with pytest.raises(TableAlreadyExistsError): + test_catalog.create_table((database_name, table_name), table_schema_nested) + + +def test_load_table(test_catalog: Catalog, table_schema_nested: Schema, database_name: str, table_name: str) -> None: + identifier = (database_name, table_name) + test_catalog.create_namespace(database_name) + table = test_catalog.create_table(identifier, table_schema_nested) + loaded_table = test_catalog.load_table(identifier) + assert table.identifier == loaded_table.identifier + assert table.metadata_location == loaded_table.metadata_location + assert table.metadata == loaded_table.metadata + + +def test_list_tables(test_catalog: Catalog, table_schema_nested: Schema, database_name: str, table_list: List[str]) -> None: + test_catalog.create_namespace(database_name) + for table_name in table_list: + test_catalog.create_table((database_name, table_name), table_schema_nested) + identifier_list = test_catalog.list_tables(database_name) + assert len(identifier_list) == LIST_TEST_NUMBER + for table_name in table_list: + assert (database_name, table_name) in identifier_list + + +def test_rename_table( + test_catalog: Catalog, s3: boto3.client, table_schema_nested: Schema, table_name: str, database_name: str +) -> None: + new_database_name = f"{database_name}_new" + test_catalog.create_namespace(database_name) + test_catalog.create_namespace(new_database_name) + new_table_name = f"rename-{table_name}" + identifier = (database_name, table_name) + table = test_catalog.create_table(identifier, table_schema_nested) + assert table.identifier == (test_catalog.name,) + identifier + new_identifier = (new_database_name, new_table_name) + test_catalog.rename_table(identifier, new_identifier) + new_table = test_catalog.load_table(new_identifier) + assert new_table.identifier == (test_catalog.name,) + new_identifier + assert new_table.metadata_location == table.metadata_location + metadata_location = new_table.metadata_location.split(get_bucket_name())[1][1:] + s3.head_object(Bucket=get_bucket_name(), Key=metadata_location) + with pytest.raises(NoSuchTableError): + test_catalog.load_table(identifier) + + +def test_drop_table(test_catalog: Catalog, table_schema_nested: Schema, table_name: str, database_name: str) -> None: + identifier = (database_name, table_name) + test_catalog.create_namespace(database_name) + table = test_catalog.create_table(identifier, table_schema_nested) + assert table.identifier == (test_catalog.name,) + identifier + test_catalog.drop_table(identifier) + with pytest.raises(NoSuchTableError): + test_catalog.load_table(identifier) + + +def test_purge_table( + test_catalog: Catalog, s3: boto3.client, table_schema_nested: Schema, table_name: str, database_name: str +) -> None: + identifier = (database_name, table_name) + test_catalog.create_namespace(database_name) + test_catalog.create_table(identifier, table_schema_nested) + table = test_catalog.load_table(identifier) + assert table.identifier == (test_catalog.name,) + identifier + metadata_location = table.metadata_location.split(get_bucket_name())[1][1:] + s3.head_object(Bucket=get_bucket_name(), Key=metadata_location) + test_catalog.purge_table(identifier) + with pytest.raises(NoSuchTableError): + test_catalog.load_table(identifier) + with pytest.raises(ClientError): + s3.head_object(Bucket=get_bucket_name(), Key=metadata_location) + + +def test_create_namespace(test_catalog: Catalog, database_name: str) -> None: + test_catalog.create_namespace(database_name) + assert (database_name,) in test_catalog.list_namespaces() + + +def test_create_duplicate_namespace(test_catalog: Catalog, database_name: str) -> None: + test_catalog.create_namespace(database_name) + with pytest.raises(NamespaceAlreadyExistsError): + test_catalog.create_namespace(database_name) + + +def test_create_namespace_with_comment_and_location(test_catalog: Catalog, database_name: str) -> None: + test_location = get_s3_path(get_bucket_name(), database_name) + test_properties = { + "comment": "this is a test description", + "location": test_location, + } + test_catalog.create_namespace(namespace=database_name, properties=test_properties) + loaded_database_list = test_catalog.list_namespaces() + assert (database_name,) in loaded_database_list + properties = test_catalog.load_namespace_properties(database_name) + assert properties["comment"] == "this is a test description" + assert properties["location"] == test_location + + +def test_list_namespaces(test_catalog: Catalog, database_list: List[str]) -> None: + for database_name in database_list: + test_catalog.create_namespace(database_name) + db_list = test_catalog.list_namespaces() + for database_name in database_list: + assert (database_name,) in db_list + assert len(test_catalog.list_namespaces(list(database_list)[0])) == 0 + + +def test_drop_namespace(test_catalog: Catalog, table_schema_nested: Schema, table_name: str, database_name: str) -> None: + test_catalog.create_namespace(database_name) + assert (database_name,) in test_catalog.list_namespaces() + test_catalog.create_table((database_name, table_name), table_schema_nested) + with pytest.raises(NamespaceNotEmptyError): + test_catalog.drop_namespace(database_name) + test_catalog.drop_table((database_name, table_name)) + test_catalog.drop_namespace(database_name) + assert (database_name,) not in test_catalog.list_namespaces() + + +def test_load_namespace_properties(test_catalog: Catalog, database_name: str) -> None: + warehouse_location = get_s3_path(get_bucket_name()) + test_properties = { + "comment": "this is a test description", + "location": f"{warehouse_location}/{database_name}.db", + "test_property1": "1", + "test_property2": "2", + "test_property3": "3", + } + + test_catalog.create_namespace(database_name, test_properties) + listed_properties = test_catalog.load_namespace_properties(database_name) + for k, v in listed_properties.items(): + assert k in test_properties + assert v == test_properties[k] + + +def test_load_empty_namespace_properties(test_catalog: Catalog, database_name: str) -> None: + test_catalog.create_namespace(database_name) + listed_properties = test_catalog.load_namespace_properties(database_name) + assert listed_properties == {} + + +def test_update_namespace_properties(test_catalog: Catalog, database_name: str) -> None: + warehouse_location = get_s3_path(get_bucket_name()) + test_properties = { + "comment": "this is a test description", + "location": f"{warehouse_location}/{database_name}.db", + "test_property1": "1", + "test_property2": "2", + "test_property3": "3", + } + removals = {"test_property1", "test_property2", "test_property3", "should_not_removed"} + updates = {"test_property4": "4", "test_property5": "5", "comment": "updated test description"} + test_catalog.create_namespace(database_name, test_properties) + update_report = test_catalog.update_namespace_properties(database_name, removals, updates) + for k in updates.keys(): + assert k in update_report.updated + for k in removals: + if k == "should_not_removed": + assert k in update_report.missing + else: + assert k in update_report.removed + assert "updated test description" == test_catalog.load_namespace_properties(database_name)["comment"] diff --git a/tests/catalog/integration_test_glue.py b/tests/catalog/integration_test_glue.py new file mode 100644 index 0000000000..2689ef14d3 --- /dev/null +++ b/tests/catalog/integration_test_glue.py @@ -0,0 +1,263 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from typing import Generator, List + +import boto3 +import pytest +from botocore.exceptions import ClientError + +from pyiceberg.catalog import Catalog +from pyiceberg.catalog.glue import GlueCatalog +from pyiceberg.exceptions import ( + NamespaceAlreadyExistsError, + NamespaceNotEmptyError, + NoSuchNamespaceError, + NoSuchTableError, + TableAlreadyExistsError, +) +from pyiceberg.schema import Schema +from tests.conftest import clean_up, get_bucket_name, get_s3_path + +# The number of tables/databases used in list_table/namespace test +LIST_TEST_NUMBER = 2 +CATALOG_NAME = "glue" + + +@pytest.fixture(name="glue", scope="module") +def fixture_glue_client() -> boto3.client: + yield boto3.client("glue") + + +@pytest.fixture(name="test_catalog", scope="module") +def fixture_test_catalog() -> Generator[Catalog, None, None]: + """Configure the pre- and post-setting of aws integration test.""" + test_catalog = GlueCatalog(CATALOG_NAME, warehouse=get_s3_path(get_bucket_name())) + yield test_catalog + clean_up(test_catalog) + + +def test_create_table( + test_catalog: Catalog, s3: boto3.client, table_schema_nested: Schema, table_name: str, database_name: str +) -> None: + identifier = (database_name, table_name) + test_catalog.create_namespace(database_name) + test_catalog.create_table(identifier, table_schema_nested, get_s3_path(get_bucket_name(), database_name, table_name)) + table = test_catalog.load_table(identifier) + assert table.identifier == (CATALOG_NAME,) + identifier + metadata_location = table.metadata_location.split(get_bucket_name())[1][1:] + s3.head_object(Bucket=get_bucket_name(), Key=metadata_location) + + +def test_create_table_with_invalid_location(table_schema_nested: Schema, table_name: str, database_name: str) -> None: + identifier = (database_name, table_name) + test_catalog_no_warehouse = GlueCatalog("glue") + test_catalog_no_warehouse.create_namespace(database_name) + with pytest.raises(ValueError): + test_catalog_no_warehouse.create_table(identifier, table_schema_nested) + test_catalog_no_warehouse.drop_namespace(database_name) + + +def test_create_table_with_default_location( + test_catalog: Catalog, s3: boto3.client, table_schema_nested: Schema, table_name: str, database_name: str +) -> None: + identifier = (database_name, table_name) + test_catalog.create_namespace(database_name) + test_catalog.create_table(identifier, table_schema_nested) + table = test_catalog.load_table(identifier) + assert table.identifier == (CATALOG_NAME,) + identifier + metadata_location = table.metadata_location.split(get_bucket_name())[1][1:] + s3.head_object(Bucket=get_bucket_name(), Key=metadata_location) + + +def test_create_table_with_invalid_database(test_catalog: Catalog, table_schema_nested: Schema, table_name: str) -> None: + identifier = ("invalid", table_name) + with pytest.raises(NoSuchNamespaceError): + test_catalog.create_table(identifier, table_schema_nested) + + +def test_create_duplicated_table(test_catalog: Catalog, table_schema_nested: Schema, table_name: str, database_name: str) -> None: + test_catalog.create_namespace(database_name) + test_catalog.create_table((database_name, table_name), table_schema_nested) + with pytest.raises(TableAlreadyExistsError): + test_catalog.create_table((database_name, table_name), table_schema_nested) + + +def test_load_table(test_catalog: Catalog, table_schema_nested: Schema, table_name: str, database_name: str) -> None: + identifier = (database_name, table_name) + test_catalog.create_namespace(database_name) + table = test_catalog.create_table(identifier, table_schema_nested) + loaded_table = test_catalog.load_table(identifier) + assert table.identifier == loaded_table.identifier + assert table.metadata_location == loaded_table.metadata_location + assert table.metadata == loaded_table.metadata + + +def test_list_tables(test_catalog: Catalog, table_schema_nested: Schema, database_name: str, table_list: List[str]) -> None: + test_catalog.create_namespace(database_name) + for table_name in table_list: + test_catalog.create_table((database_name, table_name), table_schema_nested) + identifier_list = test_catalog.list_tables(database_name) + assert len(identifier_list) == LIST_TEST_NUMBER + for table_name in table_list: + assert (database_name, table_name) in identifier_list + + +def test_rename_table( + test_catalog: Catalog, s3: boto3.client, table_schema_nested: Schema, table_name: str, database_name: str +) -> None: + new_database_name = f"{database_name}_new" + test_catalog.create_namespace(database_name) + test_catalog.create_namespace(new_database_name) + new_table_name = f"rename-{table_name}" + identifier = (database_name, table_name) + table = test_catalog.create_table(identifier, table_schema_nested) + assert table.identifier == (CATALOG_NAME,) + identifier + new_identifier = (new_database_name, new_table_name) + test_catalog.rename_table(identifier, new_identifier) + new_table = test_catalog.load_table(new_identifier) + assert new_table.identifier == (CATALOG_NAME,) + new_identifier + assert new_table.metadata_location == table.metadata_location + metadata_location = new_table.metadata_location.split(get_bucket_name())[1][1:] + s3.head_object(Bucket=get_bucket_name(), Key=metadata_location) + with pytest.raises(NoSuchTableError): + test_catalog.load_table(identifier) + + +def test_drop_table(test_catalog: Catalog, table_schema_nested: Schema, table_name: str, database_name: str) -> None: + identifier = (database_name, table_name) + test_catalog.create_namespace(database_name) + table = test_catalog.create_table(identifier, table_schema_nested) + assert table.identifier == (CATALOG_NAME,) + identifier + test_catalog.drop_table(identifier) + with pytest.raises(NoSuchTableError): + test_catalog.load_table(identifier) + + +def test_purge_table( + test_catalog: Catalog, s3: boto3.client, table_schema_nested: Schema, table_name: str, database_name: str +) -> None: + identifier = (database_name, table_name) + test_catalog.create_namespace(database_name) + test_catalog.create_table(identifier, table_schema_nested) + table = test_catalog.load_table(identifier) + assert table.identifier == (CATALOG_NAME,) + identifier + metadata_location = table.metadata_location.split(get_bucket_name())[1][1:] + s3.head_object(Bucket=get_bucket_name(), Key=metadata_location) + test_catalog.purge_table(identifier) + with pytest.raises(NoSuchTableError): + test_catalog.load_table(identifier) + with pytest.raises(ClientError): + s3.head_object(Bucket=get_bucket_name(), Key=metadata_location) + + +def test_create_namespace(test_catalog: Catalog, database_name: str) -> None: + test_catalog.create_namespace(database_name) + assert (database_name,) in test_catalog.list_namespaces() + + +def test_create_duplicate_namespace(test_catalog: Catalog, database_name: str) -> None: + test_catalog.create_namespace(database_name) + with pytest.raises(NamespaceAlreadyExistsError): + test_catalog.create_namespace(database_name) + + +def test_create_namespace_with_comment_and_location(test_catalog: Catalog, database_name: str) -> None: + test_location = get_s3_path(get_bucket_name(), database_name) + test_properties = { + "comment": "this is a test description", + "location": test_location, + } + test_catalog.create_namespace(namespace=database_name, properties=test_properties) + loaded_database_list = test_catalog.list_namespaces() + assert (database_name,) in loaded_database_list + properties = test_catalog.load_namespace_properties(database_name) + assert properties["comment"] == "this is a test description" + assert properties["location"] == test_location + + +def test_list_namespaces(test_catalog: Catalog, database_list: List[str]) -> None: + for database_name in database_list: + test_catalog.create_namespace(database_name) + db_list = test_catalog.list_namespaces() + for database_name in database_list: + assert (database_name,) in db_list + assert len(test_catalog.list_namespaces(list(database_list)[0])) == 0 + + +def test_drop_namespace(test_catalog: Catalog, table_schema_nested: Schema, database_name: str, table_name: str) -> None: + test_catalog.create_namespace(database_name) + assert (database_name,) in test_catalog.list_namespaces() + test_catalog.create_table((database_name, table_name), table_schema_nested) + with pytest.raises(NamespaceNotEmptyError): + test_catalog.drop_namespace(database_name) + test_catalog.drop_table((database_name, table_name)) + test_catalog.drop_namespace(database_name) + assert (database_name,) not in test_catalog.list_namespaces() + + +def test_load_namespace_properties(test_catalog: Catalog, database_name: str) -> None: + warehouse_location = get_s3_path(get_bucket_name()) + test_properties = { + "comment": "this is a test description", + "location": f"{warehouse_location}/{database_name}.db", + "test_property1": "1", + "test_property2": "2", + "test_property3": "3", + } + + test_catalog.create_namespace(database_name, test_properties) + listed_properties = test_catalog.load_namespace_properties(database_name) + for k, v in listed_properties.items(): + assert k in test_properties + assert v == test_properties[k] + + +def test_load_empty_namespace_properties(test_catalog: Catalog, database_name: str) -> None: + test_catalog.create_namespace(database_name) + listed_properties = test_catalog.load_namespace_properties(database_name) + assert listed_properties == {} + + +def test_load_default_namespace_properties(test_catalog: Catalog, glue: boto3.client, database_name: str) -> None: + # simulate creating database with default settings through AWS Glue Web Console + glue.create_database(DatabaseInput={"Name": database_name}) + listed_properties = test_catalog.load_namespace_properties(database_name) + assert listed_properties == {} + + +def test_update_namespace_properties(test_catalog: Catalog, database_name: str) -> None: + warehouse_location = get_s3_path(get_bucket_name()) + test_properties = { + "comment": "this is a test description", + "location": f"{warehouse_location}/{database_name}.db", + "test_property1": "1", + "test_property2": "2", + "test_property3": "3", + } + removals = {"test_property1", "test_property2", "test_property3", "should_not_removed"} + updates = {"test_property4": "4", "test_property5": "5", "comment": "updated test description"} + test_catalog.create_namespace(database_name, test_properties) + update_report = test_catalog.update_namespace_properties(database_name, removals, updates) + for k in updates.keys(): + assert k in update_report.updated + for k in removals: + if k == "should_not_removed": + assert k in update_report.missing + else: + assert k in update_report.removed + assert "updated test description" == test_catalog.load_namespace_properties(database_name)["comment"] diff --git a/tests/catalog/test_base.py b/tests/catalog/test_base.py new file mode 100644 index 0000000000..da121f6114 --- /dev/null +++ b/tests/catalog/test_base.py @@ -0,0 +1,605 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# pylint:disable=redefined-outer-name + +from typing import ( + Dict, + List, + Optional, + Set, + Union, +) + +import pytest + +from pyiceberg.catalog import ( + Catalog, + Identifier, + Properties, + PropertiesUpdateSummary, +) +from pyiceberg.exceptions import ( + NamespaceAlreadyExistsError, + NamespaceNotEmptyError, + NoSuchNamespaceError, + NoSuchTableError, + TableAlreadyExistsError, +) +from pyiceberg.io import load_file_io +from pyiceberg.partitioning import UNPARTITIONED_PARTITION_SPEC, PartitionField, PartitionSpec +from pyiceberg.schema import Schema +from pyiceberg.table import ( + AddSchemaUpdate, + CommitTableRequest, + CommitTableResponse, + SetCurrentSchemaUpdate, + Table, +) +from pyiceberg.table.metadata import TableMetadata, TableMetadataV1, new_table_metadata +from pyiceberg.table.sorting import UNSORTED_SORT_ORDER, SortOrder +from pyiceberg.transforms import IdentityTransform +from pyiceberg.typedef import EMPTY_DICT +from pyiceberg.types import IntegerType, LongType, NestedField + + +class InMemoryCatalog(Catalog): + """An in-memory catalog implementation for testing purposes.""" + + __tables: Dict[Identifier, Table] + __namespaces: Dict[Identifier, Properties] + + def __init__(self, name: str, **properties: str) -> None: + super().__init__(name, **properties) + self.__tables = {} + self.__namespaces = {} + + def create_table( + self, + identifier: Union[str, Identifier], + schema: Schema, + location: Optional[str] = None, + partition_spec: PartitionSpec = UNPARTITIONED_PARTITION_SPEC, + sort_order: SortOrder = UNSORTED_SORT_ORDER, + properties: Properties = EMPTY_DICT, + ) -> Table: + identifier = Catalog.identifier_to_tuple(identifier) + namespace = Catalog.namespace_from(identifier) + + if identifier in self.__tables: + raise TableAlreadyExistsError(f"Table already exists: {identifier}") + else: + if namespace not in self.__namespaces: + self.__namespaces[namespace] = {} + + new_location = location or f's3://warehouse/{"/".join(identifier)}/data' + metadata = TableMetadataV1( + **{ + "format-version": 1, + "table-uuid": "d20125c8-7284-442c-9aea-15fee620737c", + "location": new_location, + "last-updated-ms": 1602638573874, + "last-column-id": schema.highest_field_id, + "schema": schema.model_dump(), + "partition-spec": partition_spec.model_dump()["fields"], + "properties": properties, + "current-snapshot-id": -1, + "snapshots": [{"snapshot-id": 1925, "timestamp-ms": 1602638573822}], + } + ) + table = Table( + identifier=identifier, + metadata=metadata, + metadata_location=f's3://warehouse/{"/".join(identifier)}/metadata/metadata.json', + io=load_file_io(), + catalog=self, + ) + self.__tables[identifier] = table + return table + + def register_table(self, identifier: Union[str, Identifier], metadata_location: str) -> Table: + raise NotImplementedError + + def _commit_table(self, table_request: CommitTableRequest) -> CommitTableResponse: + new_metadata: Optional[TableMetadata] = None + metadata_location = "" + for update in table_request.updates: + if isinstance(update, AddSchemaUpdate): + add_schema_update: AddSchemaUpdate = update + identifier = Catalog.identifier_to_tuple(table_request.identifier) + table = self.__tables[("com", *identifier)] + new_metadata = new_table_metadata( + add_schema_update.schema_, + table.metadata.partition_specs[0], + table.sort_order(), + table.location(), + table.properties, + table.metadata.table_uuid, + ) + + table = Table( + identifier=identifier, + metadata=new_metadata, + metadata_location=f's3://warehouse/{"/".join(identifier)}/metadata/metadata.json', + io=load_file_io(), + catalog=self, + ) + + self.__tables[identifier] = table + metadata_location = f's3://warehouse/{"/".join(identifier)}/metadata/metadata.json' + + return CommitTableResponse( + metadata=new_metadata.model_dump() if new_metadata else {}, + metadata_location=metadata_location if metadata_location else "", + ) + + def load_table(self, identifier: Union[str, Identifier]) -> Table: + identifier = Catalog.identifier_to_tuple(identifier) + try: + return self.__tables[identifier] + except KeyError as error: + raise NoSuchTableError(f"Table does not exist: {identifier}") from error + + def drop_table(self, identifier: Union[str, Identifier]) -> None: + identifier = Catalog.identifier_to_tuple(identifier) + try: + self.__tables.pop(identifier) + except KeyError as error: + raise NoSuchTableError(f"Table does not exist: {identifier}") from error + + def purge_table(self, identifier: Union[str, Identifier]) -> None: + self.drop_table(identifier) + + def rename_table(self, from_identifier: Union[str, Identifier], to_identifier: Union[str, Identifier]) -> Table: + from_identifier = Catalog.identifier_to_tuple(from_identifier) + try: + table = self.__tables.pop(from_identifier) + except KeyError as error: + raise NoSuchTableError(f"Table does not exist: {from_identifier}") from error + + to_identifier = Catalog.identifier_to_tuple(to_identifier) + to_namespace = Catalog.namespace_from(to_identifier) + if to_namespace not in self.__namespaces: + self.__namespaces[to_namespace] = {} + + self.__tables[to_identifier] = Table( + identifier=to_identifier, + metadata=table.metadata, + metadata_location=table.metadata_location, + io=load_file_io(), + catalog=self, + ) + return self.__tables[to_identifier] + + def create_namespace(self, namespace: Union[str, Identifier], properties: Properties = EMPTY_DICT) -> None: + namespace = Catalog.identifier_to_tuple(namespace) + if namespace in self.__namespaces: + raise NamespaceAlreadyExistsError(f"Namespace already exists: {namespace}") + else: + self.__namespaces[namespace] = properties if properties else {} + + def drop_namespace(self, namespace: Union[str, Identifier]) -> None: + namespace = Catalog.identifier_to_tuple(namespace) + if [table_identifier for table_identifier in self.__tables.keys() if namespace == table_identifier[:-1]]: + raise NamespaceNotEmptyError(f"Namespace is not empty: {namespace}") + try: + self.__namespaces.pop(namespace) + except KeyError as error: + raise NoSuchNamespaceError(f"Namespace does not exist: {namespace}") from error + + def list_tables(self, namespace: Optional[Union[str, Identifier]] = None) -> List[Identifier]: + if namespace: + namespace = Catalog.identifier_to_tuple(namespace) + list_tables = [table_identifier for table_identifier in self.__tables.keys() if namespace == table_identifier[:-1]] + else: + list_tables = list(self.__tables.keys()) + + return list_tables + + def list_namespaces(self, namespace: Union[str, Identifier] = ()) -> List[Identifier]: + # Hierarchical namespace is not supported. Return an empty list + if namespace: + return [] + + return list(self.__namespaces.keys()) + + def load_namespace_properties(self, namespace: Union[str, Identifier]) -> Properties: + namespace = Catalog.identifier_to_tuple(namespace) + try: + return self.__namespaces[namespace] + except KeyError as error: + raise NoSuchNamespaceError(f"Namespace does not exist: {namespace}") from error + + def update_namespace_properties( + self, namespace: Union[str, Identifier], removals: Optional[Set[str]] = None, updates: Properties = EMPTY_DICT + ) -> PropertiesUpdateSummary: + removed: Set[str] = set() + updated: Set[str] = set() + + namespace = Catalog.identifier_to_tuple(namespace) + if namespace in self.__namespaces: + if removals: + for key in removals: + if key in self.__namespaces[namespace]: + del self.__namespaces[namespace][key] + removed.add(key) + if updates: + for key, value in updates.items(): + self.__namespaces[namespace][key] = value + updated.add(key) + else: + raise NoSuchNamespaceError(f"Namespace does not exist: {namespace}") + + expected_to_change = removed.difference(removals or set()) + + return PropertiesUpdateSummary( + removed=list(removed or []), updated=list(updates.keys() if updates else []), missing=list(expected_to_change) + ) + + +@pytest.fixture +def catalog() -> InMemoryCatalog: + return InMemoryCatalog("test.in.memory.catalog", **{"test.key": "test.value"}) + + +TEST_TABLE_IDENTIFIER = ("com", "organization", "department", "my_table") +TEST_TABLE_NAMESPACE = ("com", "organization", "department") +TEST_TABLE_NAME = "my_table" +TEST_TABLE_SCHEMA = Schema( + NestedField(1, "x", LongType()), + NestedField(2, "y", LongType(), doc="comment"), + NestedField(3, "z", LongType()), +) +TEST_TABLE_LOCATION = "protocol://some/location" +TEST_TABLE_PARTITION_SPEC = PartitionSpec(PartitionField(name="x", transform=IdentityTransform(), source_id=1, field_id=1000)) +TEST_TABLE_PROPERTIES = {"key1": "value1", "key2": "value2"} +NO_SUCH_TABLE_ERROR = "Table does not exist: \\('com', 'organization', 'department', 'my_table'\\)" +TABLE_ALREADY_EXISTS_ERROR = "Table already exists: \\('com', 'organization', 'department', 'my_table'\\)" +NAMESPACE_ALREADY_EXISTS_ERROR = "Namespace already exists: \\('com', 'organization', 'department'\\)" +NO_SUCH_NAMESPACE_ERROR = "Namespace does not exist: \\('com', 'organization', 'department'\\)" +NAMESPACE_NOT_EMPTY_ERROR = "Namespace is not empty: \\('com', 'organization', 'department'\\)" + + +def given_catalog_has_a_table(catalog: InMemoryCatalog) -> Table: + return catalog.create_table( + identifier=TEST_TABLE_IDENTIFIER, + schema=TEST_TABLE_SCHEMA, + location=TEST_TABLE_LOCATION, + partition_spec=TEST_TABLE_PARTITION_SPEC, + properties=TEST_TABLE_PROPERTIES, + ) + + +def test_namespace_from_tuple() -> None: + # Given + identifier = ("com", "organization", "department", "my_table") + # When + namespace_from = Catalog.namespace_from(identifier) + # Then + assert namespace_from == ("com", "organization", "department") + + +def test_namespace_from_str() -> None: + # Given + identifier = "com.organization.department.my_table" + # When + namespace_from = Catalog.namespace_from(identifier) + # Then + assert namespace_from == ("com", "organization", "department") + + +def test_name_from_tuple() -> None: + # Given + identifier = ("com", "organization", "department", "my_table") + # When + name_from = Catalog.table_name_from(identifier) + # Then + assert name_from == "my_table" + + +def test_name_from_str() -> None: + # Given + identifier = "com.organization.department.my_table" + # When + name_from = Catalog.table_name_from(identifier) + # Then + assert name_from == "my_table" + + +def test_create_table(catalog: InMemoryCatalog) -> None: + table = catalog.create_table( + identifier=TEST_TABLE_IDENTIFIER, + schema=TEST_TABLE_SCHEMA, + location=TEST_TABLE_LOCATION, + partition_spec=TEST_TABLE_PARTITION_SPEC, + properties=TEST_TABLE_PROPERTIES, + ) + assert catalog.load_table(TEST_TABLE_IDENTIFIER) == table + + +def test_create_table_raises_error_when_table_already_exists(catalog: InMemoryCatalog) -> None: + # Given + given_catalog_has_a_table(catalog) + # When + with pytest.raises(TableAlreadyExistsError, match=TABLE_ALREADY_EXISTS_ERROR): + catalog.create_table( + identifier=TEST_TABLE_IDENTIFIER, + schema=TEST_TABLE_SCHEMA, + ) + + +def test_load_table(catalog: InMemoryCatalog) -> None: + # Given + given_table = given_catalog_has_a_table(catalog) + # When + table = catalog.load_table(TEST_TABLE_IDENTIFIER) + # Then + assert table == given_table + + +def test_table_raises_error_on_table_not_found(catalog: InMemoryCatalog) -> None: + with pytest.raises(NoSuchTableError, match=NO_SUCH_TABLE_ERROR): + catalog.load_table(TEST_TABLE_IDENTIFIER) + + +def test_drop_table(catalog: InMemoryCatalog) -> None: + # Given + given_catalog_has_a_table(catalog) + # When + catalog.drop_table(TEST_TABLE_IDENTIFIER) + # Then + with pytest.raises(NoSuchTableError, match=NO_SUCH_TABLE_ERROR): + catalog.load_table(TEST_TABLE_IDENTIFIER) + + +def test_drop_table_that_does_not_exist_raise_error(catalog: InMemoryCatalog) -> None: + with pytest.raises(NoSuchTableError, match=NO_SUCH_TABLE_ERROR): + catalog.load_table(TEST_TABLE_IDENTIFIER) + + +def test_purge_table(catalog: InMemoryCatalog) -> None: + # Given + given_catalog_has_a_table(catalog) + # When + catalog.purge_table(TEST_TABLE_IDENTIFIER) + # Then + with pytest.raises(NoSuchTableError, match=NO_SUCH_TABLE_ERROR): + catalog.load_table(TEST_TABLE_IDENTIFIER) + + +def test_rename_table(catalog: InMemoryCatalog) -> None: + # Given + given_catalog_has_a_table(catalog) + + # When + new_table = "new.namespace.new_table" + table = catalog.rename_table(TEST_TABLE_IDENTIFIER, new_table) + + # Then + assert table.identifier == Catalog.identifier_to_tuple(new_table) + + # And + table = catalog.load_table(new_table) + assert table.identifier == Catalog.identifier_to_tuple(new_table) + + # And + assert ("new", "namespace") in catalog.list_namespaces() + + # And + with pytest.raises(NoSuchTableError, match=NO_SUCH_TABLE_ERROR): + catalog.load_table(TEST_TABLE_IDENTIFIER) + + +def test_create_namespace(catalog: InMemoryCatalog) -> None: + # When + catalog.create_namespace(TEST_TABLE_NAMESPACE, TEST_TABLE_PROPERTIES) + + # Then + assert TEST_TABLE_NAMESPACE in catalog.list_namespaces() + assert TEST_TABLE_PROPERTIES == catalog.load_namespace_properties(TEST_TABLE_NAMESPACE) + + +def test_create_namespace_raises_error_on_existing_namespace(catalog: InMemoryCatalog) -> None: + # Given + catalog.create_namespace(TEST_TABLE_NAMESPACE, TEST_TABLE_PROPERTIES) + # When + with pytest.raises(NamespaceAlreadyExistsError, match=NAMESPACE_ALREADY_EXISTS_ERROR): + catalog.create_namespace(TEST_TABLE_NAMESPACE, TEST_TABLE_PROPERTIES) + + +def test_get_namespace_metadata_raises_error_when_namespace_does_not_exist(catalog: InMemoryCatalog) -> None: + with pytest.raises(NoSuchNamespaceError, match=NO_SUCH_NAMESPACE_ERROR): + catalog.load_namespace_properties(TEST_TABLE_NAMESPACE) + + +def test_list_namespaces(catalog: InMemoryCatalog) -> None: + # Given + catalog.create_namespace(TEST_TABLE_NAMESPACE, TEST_TABLE_PROPERTIES) + # When + namespaces = catalog.list_namespaces() + # Then + assert TEST_TABLE_NAMESPACE in namespaces + + +def test_drop_namespace(catalog: InMemoryCatalog) -> None: + # Given + catalog.create_namespace(TEST_TABLE_NAMESPACE, TEST_TABLE_PROPERTIES) + # When + catalog.drop_namespace(TEST_TABLE_NAMESPACE) + # Then + assert TEST_TABLE_NAMESPACE not in catalog.list_namespaces() + + +def test_drop_namespace_raises_error_when_namespace_does_not_exist(catalog: InMemoryCatalog) -> None: + with pytest.raises(NoSuchNamespaceError, match=NO_SUCH_NAMESPACE_ERROR): + catalog.drop_namespace(TEST_TABLE_NAMESPACE) + + +def test_drop_namespace_raises_error_when_namespace_not_empty(catalog: InMemoryCatalog) -> None: + # Given + given_catalog_has_a_table(catalog) + # When + with pytest.raises(NamespaceNotEmptyError, match=NAMESPACE_NOT_EMPTY_ERROR): + catalog.drop_namespace(TEST_TABLE_NAMESPACE) + + +def test_list_tables(catalog: InMemoryCatalog) -> None: + # Given + given_catalog_has_a_table(catalog) + # When + tables = catalog.list_tables() + # Then + assert tables + assert TEST_TABLE_IDENTIFIER in tables + + +def test_list_tables_under_a_namespace(catalog: InMemoryCatalog) -> None: + # Given + given_catalog_has_a_table(catalog) + new_namespace = ("new", "namespace") + catalog.create_namespace(new_namespace) + # When + all_tables = catalog.list_tables() + new_namespace_tables = catalog.list_tables(new_namespace) + # Then + assert all_tables + assert TEST_TABLE_IDENTIFIER in all_tables + assert new_namespace_tables == [] + + +def test_update_namespace_metadata(catalog: InMemoryCatalog) -> None: + # Given + catalog.create_namespace(TEST_TABLE_NAMESPACE, TEST_TABLE_PROPERTIES) + + # When + new_metadata = {"key3": "value3", "key4": "value4"} + summary = catalog.update_namespace_properties(TEST_TABLE_NAMESPACE, updates=new_metadata) + + # Then + assert TEST_TABLE_NAMESPACE in catalog.list_namespaces() + assert new_metadata.items() <= catalog.load_namespace_properties(TEST_TABLE_NAMESPACE).items() + assert summary == PropertiesUpdateSummary(removed=[], updated=["key3", "key4"], missing=[]) + + +def test_update_namespace_metadata_removals(catalog: InMemoryCatalog) -> None: + # Given + catalog.create_namespace(TEST_TABLE_NAMESPACE, TEST_TABLE_PROPERTIES) + + # When + new_metadata = {"key3": "value3", "key4": "value4"} + remove_metadata = {"key1"} + summary = catalog.update_namespace_properties(TEST_TABLE_NAMESPACE, remove_metadata, new_metadata) + + # Then + assert TEST_TABLE_NAMESPACE in catalog.list_namespaces() + assert new_metadata.items() <= catalog.load_namespace_properties(TEST_TABLE_NAMESPACE).items() + assert remove_metadata.isdisjoint(catalog.load_namespace_properties(TEST_TABLE_NAMESPACE).keys()) + assert summary == PropertiesUpdateSummary(removed=["key1"], updated=["key3", "key4"], missing=[]) + + +def test_update_namespace_metadata_raises_error_when_namespace_does_not_exist(catalog: InMemoryCatalog) -> None: + with pytest.raises(NoSuchNamespaceError, match=NO_SUCH_NAMESPACE_ERROR): + catalog.update_namespace_properties(TEST_TABLE_NAMESPACE, updates=TEST_TABLE_PROPERTIES) + + +def test_commit_table(catalog: InMemoryCatalog) -> None: + # Given + given_table = given_catalog_has_a_table(catalog) + new_schema = Schema( + NestedField(1, "x", LongType()), + NestedField(2, "y", LongType(), doc="comment"), + NestedField(3, "z", LongType()), + NestedField(4, "add", LongType()), + ) + + # When + response = given_table.catalog._commit_table( # pylint: disable=W0212 + CommitTableRequest( + identifier=given_table.identifier[1:], + updates=[ + AddSchemaUpdate(schema=new_schema, last_column_id=new_schema.highest_field_id), + SetCurrentSchemaUpdate(schema_id=-1), + ], + ) + ) + + # Then + assert response.metadata.table_uuid == given_table.metadata.table_uuid + assert len(response.metadata.schemas) == 1 + assert response.metadata.schemas[0] == new_schema + + +def test_add_column(catalog: InMemoryCatalog) -> None: + given_table = given_catalog_has_a_table(catalog) + + given_table.update_schema().add_column(path="new_column1", field_type=IntegerType()).commit() + + assert given_table.schema() == Schema( + NestedField(field_id=1, name="x", field_type=LongType(), required=True), + NestedField(field_id=2, name="y", field_type=LongType(), required=True, doc="comment"), + NestedField(field_id=3, name="z", field_type=LongType(), required=True), + NestedField(field_id=4, name="new_column1", field_type=IntegerType(), required=False), + schema_id=0, + identifier_field_ids=[], + ) + + transaction = given_table.transaction() + transaction.update_schema().add_column(path="new_column2", field_type=IntegerType(), doc="doc").commit() + transaction.commit_transaction() + + assert given_table.schema() == Schema( + NestedField(field_id=1, name="x", field_type=LongType(), required=True), + NestedField(field_id=2, name="y", field_type=LongType(), required=True, doc="comment"), + NestedField(field_id=3, name="z", field_type=LongType(), required=True), + NestedField(field_id=4, name="new_column1", field_type=IntegerType(), required=False), + NestedField(field_id=5, name="new_column2", field_type=IntegerType(), required=False, doc="doc"), + schema_id=0, + identifier_field_ids=[], + ) + + +def test_add_column_with_statement(catalog: InMemoryCatalog) -> None: + given_table = given_catalog_has_a_table(catalog) + + with given_table.update_schema() as tx: + tx.add_column(path="new_column1", field_type=IntegerType()) + + assert given_table.schema() == Schema( + NestedField(field_id=1, name="x", field_type=LongType(), required=True), + NestedField(field_id=2, name="y", field_type=LongType(), required=True, doc="comment"), + NestedField(field_id=3, name="z", field_type=LongType(), required=True), + NestedField(field_id=4, name="new_column1", field_type=IntegerType(), required=False), + schema_id=0, + identifier_field_ids=[], + ) + + with given_table.transaction() as tx: + tx.update_schema().add_column(path="new_column2", field_type=IntegerType(), doc="doc").commit() + + assert given_table.schema() == Schema( + NestedField(field_id=1, name="x", field_type=LongType(), required=True), + NestedField(field_id=2, name="y", field_type=LongType(), required=True, doc="comment"), + NestedField(field_id=3, name="z", field_type=LongType(), required=True), + NestedField(field_id=4, name="new_column1", field_type=IntegerType(), required=False), + NestedField(field_id=5, name="new_column2", field_type=IntegerType(), required=False, doc="doc"), + schema_id=0, + identifier_field_ids=[], + ) + + +def test_catalog_repr(catalog: InMemoryCatalog) -> None: + s = repr(catalog) + assert s == "test.in.memory.catalog ()" diff --git a/tests/catalog/test_dynamodb.py b/tests/catalog/test_dynamodb.py new file mode 100644 index 0000000000..582cb034e8 --- /dev/null +++ b/tests/catalog/test_dynamodb.py @@ -0,0 +1,468 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +from typing import List + +import pytest +from moto import mock_dynamodb + +from pyiceberg.catalog import METADATA_LOCATION, TABLE_TYPE +from pyiceberg.catalog.dynamodb import ( + DYNAMODB_COL_CREATED_AT, + DYNAMODB_COL_IDENTIFIER, + DYNAMODB_COL_NAMESPACE, + DYNAMODB_TABLE_NAME_DEFAULT, + DynamoDbCatalog, + _add_property_prefix, +) +from pyiceberg.exceptions import ( + NamespaceAlreadyExistsError, + NamespaceNotEmptyError, + NoSuchIcebergTableError, + NoSuchNamespaceError, + NoSuchPropertyException, + NoSuchTableError, + TableAlreadyExistsError, +) +from pyiceberg.schema import Schema +from tests.conftest import BUCKET_NAME, TABLE_METADATA_LOCATION_REGEX + + +@mock_dynamodb +def test_create_dynamodb_catalog_with_table_name(_dynamodb, _bucket_initialize: None, _patch_aiobotocore: None) -> None: # type: ignore + DynamoDbCatalog("test_ddb_catalog") + response = _dynamodb.describe_table(TableName=DYNAMODB_TABLE_NAME_DEFAULT) + assert response["Table"]["TableName"] == DYNAMODB_TABLE_NAME_DEFAULT + + custom_table_name = "custom_table_name" + DynamoDbCatalog("test_ddb_catalog", **{"table-name": custom_table_name}) + response = _dynamodb.describe_table(TableName=custom_table_name) + assert response["Table"]["TableName"] == custom_table_name + + +@mock_dynamodb +def test_create_table_with_database_location( + _bucket_initialize: None, _patch_aiobotocore: None, table_schema_nested: Schema, database_name: str, table_name: str +) -> None: + catalog_name = "test_ddb_catalog" + identifier = (database_name, table_name) + test_catalog = DynamoDbCatalog(catalog_name, **{"py-io-impl": "pyiceberg.io.fsspec.FsspecFileIO"}) + test_catalog.create_namespace(namespace=database_name, properties={"location": f"s3://{BUCKET_NAME}/{database_name}.db"}) + table = test_catalog.create_table(identifier, table_schema_nested) + assert table.identifier == (catalog_name,) + identifier + assert TABLE_METADATA_LOCATION_REGEX.match(table.metadata_location) + + +@mock_dynamodb +def test_create_table_with_default_warehouse( + _bucket_initialize: None, _patch_aiobotocore: None, table_schema_nested: Schema, database_name: str, table_name: str +) -> None: + catalog_name = "test_ddb_catalog" + identifier = (database_name, table_name) + test_catalog = DynamoDbCatalog( + catalog_name, **{"py-io-impl": "pyiceberg.io.fsspec.FsspecFileIO", "warehouse": f"s3://{BUCKET_NAME}"} + ) + test_catalog.create_namespace(namespace=database_name) + table = test_catalog.create_table(identifier, table_schema_nested) + assert table.identifier == (catalog_name,) + identifier + assert TABLE_METADATA_LOCATION_REGEX.match(table.metadata_location) + + +@mock_dynamodb +def test_create_table_with_given_location( + _bucket_initialize: None, _patch_aiobotocore: None, table_schema_nested: Schema, database_name: str, table_name: str +) -> None: + catalog_name = "test_ddb_catalog" + identifier = (database_name, table_name) + test_catalog = DynamoDbCatalog(catalog_name, **{"py-io-impl": "pyiceberg.io.fsspec.FsspecFileIO"}) + test_catalog.create_namespace(namespace=database_name) + table = test_catalog.create_table( + identifier=identifier, schema=table_schema_nested, location=f"s3://{BUCKET_NAME}/{database_name}.db/{table_name}" + ) + assert table.identifier == (catalog_name,) + identifier + assert TABLE_METADATA_LOCATION_REGEX.match(table.metadata_location) + + +@mock_dynamodb +def test_create_table_with_no_location( + _bucket_initialize: None, _patch_aiobotocore: None, table_schema_nested: Schema, database_name: str, table_name: str +) -> None: + identifier = (database_name, table_name) + test_catalog = DynamoDbCatalog("test_ddb_catalog") + test_catalog.create_namespace(namespace=database_name) + with pytest.raises(ValueError): + test_catalog.create_table(identifier=identifier, schema=table_schema_nested) + + +@mock_dynamodb +def test_create_table_with_strips( + _bucket_initialize: None, _patch_aiobotocore: None, table_schema_nested: Schema, database_name: str, table_name: str +) -> None: + catalog_name = "test_ddb_catalog" + identifier = (database_name, table_name) + test_catalog = DynamoDbCatalog(catalog_name, **{"py-io-impl": "pyiceberg.io.fsspec.FsspecFileIO"}) + test_catalog.create_namespace(namespace=database_name, properties={"location": f"s3://{BUCKET_NAME}/{database_name}.db/"}) + table = test_catalog.create_table(identifier, table_schema_nested) + assert table.identifier == (catalog_name,) + identifier + assert TABLE_METADATA_LOCATION_REGEX.match(table.metadata_location) + + +@mock_dynamodb +def test_create_table_with_strips_bucket_root( + _bucket_initialize: None, _patch_aiobotocore: None, table_schema_nested: Schema, database_name: str, table_name: str +) -> None: + catalog_name = "test_ddb_catalog" + identifier = (database_name, table_name) + test_catalog = DynamoDbCatalog( + catalog_name, **{"py-io-impl": "pyiceberg.io.fsspec.FsspecFileIO", "warehouse": f"s3://{BUCKET_NAME}/"} + ) + test_catalog.create_namespace(namespace=database_name) + table_strip = test_catalog.create_table(identifier, table_schema_nested) + assert table_strip.identifier == (catalog_name,) + identifier + assert TABLE_METADATA_LOCATION_REGEX.match(table_strip.metadata_location) + + +@mock_dynamodb +def test_create_table_with_no_database( + _bucket_initialize: None, _patch_aiobotocore: None, table_schema_nested: Schema, database_name: str, table_name: str +) -> None: + identifier = (database_name, table_name) + test_catalog = DynamoDbCatalog("test_ddb_catalog") + with pytest.raises(NoSuchNamespaceError): + test_catalog.create_table(identifier=identifier, schema=table_schema_nested) + + +@mock_dynamodb +def test_create_duplicated_table( + _bucket_initialize: None, _patch_aiobotocore: None, table_schema_nested: Schema, database_name: str, table_name: str +) -> None: + identifier = (database_name, table_name) + test_catalog = DynamoDbCatalog( + "test_ddb_catalog", **{"warehouse": f"s3://{BUCKET_NAME}", "py-io-impl": "pyiceberg.io.fsspec.FsspecFileIO"} + ) + test_catalog.create_namespace(namespace=database_name) + test_catalog.create_table(identifier, table_schema_nested) + with pytest.raises(TableAlreadyExistsError): + test_catalog.create_table(identifier, table_schema_nested) + + +@mock_dynamodb +def test_load_table( + _bucket_initialize: None, _patch_aiobotocore: None, table_schema_nested: Schema, database_name: str, table_name: str +) -> None: + catalog_name = "test_ddb_catalog" + identifier = (database_name, table_name) + test_catalog = DynamoDbCatalog( + catalog_name, **{"warehouse": f"s3://{BUCKET_NAME}", "py-io-impl": "pyiceberg.io.fsspec.FsspecFileIO"} + ) + test_catalog.create_namespace(namespace=database_name) + test_catalog.create_table(identifier, table_schema_nested) + table = test_catalog.load_table(identifier) + assert table.identifier == (catalog_name,) + identifier + assert TABLE_METADATA_LOCATION_REGEX.match(table.metadata_location) + + +@mock_dynamodb +def test_load_non_exist_table(_bucket_initialize: None, _patch_aiobotocore: None, database_name: str, table_name: str) -> None: + identifier = (database_name, table_name) + test_catalog = DynamoDbCatalog("test_ddb_catalog", warehouse=f"s3://{BUCKET_NAME}") + test_catalog.create_namespace(namespace=database_name) + with pytest.raises(NoSuchTableError): + test_catalog.load_table(identifier) + + +@mock_dynamodb +def test_drop_table( + _bucket_initialize: None, _patch_aiobotocore: None, table_schema_nested: Schema, database_name: str, table_name: str +) -> None: + catalog_name = "test_ddb_catalog" + identifier = (database_name, table_name) + test_catalog = DynamoDbCatalog( + catalog_name, **{"warehouse": f"s3://{BUCKET_NAME}", "py-io-impl": "pyiceberg.io.fsspec.FsspecFileIO"} + ) + test_catalog.create_namespace(namespace=database_name) + test_catalog.create_table(identifier, table_schema_nested) + table = test_catalog.load_table(identifier) + assert table.identifier == (catalog_name,) + identifier + assert TABLE_METADATA_LOCATION_REGEX.match(table.metadata_location) + test_catalog.drop_table(identifier) + with pytest.raises(NoSuchTableError): + test_catalog.load_table(identifier) + + +@mock_dynamodb +def test_drop_non_exist_table(_bucket_initialize: None, _patch_aiobotocore: None, database_name: str, table_name: str) -> None: + identifier = (database_name, table_name) + test_catalog = DynamoDbCatalog("test_ddb_catalog", warehouse=f"s3://{BUCKET_NAME}") + with pytest.raises(NoSuchTableError): + test_catalog.drop_table(identifier) + + +@mock_dynamodb +def test_rename_table( + _bucket_initialize: None, _patch_aiobotocore: None, table_schema_nested: Schema, database_name: str, table_name: str +) -> None: + catalog_name = "test_ddb_catalog" + new_table_name = f"{table_name}_new" + identifier = (database_name, table_name) + new_identifier = (database_name, new_table_name) + test_catalog = DynamoDbCatalog( + catalog_name, **{"warehouse": f"s3://{BUCKET_NAME}", "py-io-impl": "pyiceberg.io.fsspec.FsspecFileIO"} + ) + test_catalog.create_namespace(namespace=database_name) + table = test_catalog.create_table(identifier, table_schema_nested) + assert table.identifier == (catalog_name,) + identifier + assert TABLE_METADATA_LOCATION_REGEX.match(table.metadata_location) + test_catalog.rename_table(identifier, new_identifier) + new_table = test_catalog.load_table(new_identifier) + assert new_table.identifier == (catalog_name,) + new_identifier + # the metadata_location should not change + assert new_table.metadata_location == table.metadata_location + # old table should be dropped + with pytest.raises(NoSuchTableError): + test_catalog.load_table(identifier) + + +@mock_dynamodb +def test_fail_on_rename_table_with_missing_required_params( + _bucket_initialize: None, _patch_aiobotocore: None, database_name: str, table_name: str +) -> None: + new_database_name = f"{database_name}_new" + new_table_name = f"{table_name}_new" + identifier = (database_name, table_name) + new_identifier = (new_database_name, new_table_name) + test_catalog = DynamoDbCatalog("test_ddb_catalog", warehouse=f"s3://{BUCKET_NAME}") + test_catalog.create_namespace(namespace=database_name) + test_catalog.create_namespace(namespace=new_database_name) + + # Missing required params + # pylint: disable=W0212 + test_catalog._put_dynamo_item( + item={ + DYNAMODB_COL_IDENTIFIER: {"S": f"{database_name}.{table_name}"}, + DYNAMODB_COL_NAMESPACE: {"S": database_name}, + }, + condition_expression=f"attribute_not_exists({DYNAMODB_COL_IDENTIFIER})", + ) + + with pytest.raises(NoSuchPropertyException): + test_catalog.rename_table(identifier, new_identifier) + + +@mock_dynamodb +def test_fail_on_rename_non_iceberg_table(_dynamodb, _bucket_initialize: None, _patch_aiobotocore: None, database_name: str, table_name: str) -> None: # type: ignore + new_database_name = f"{database_name}_new" + new_table_name = f"{table_name}_new" + identifier = (database_name, table_name) + new_identifier = (new_database_name, new_table_name) + test_catalog = DynamoDbCatalog("test_ddb_catalog", warehouse=f"s3://{BUCKET_NAME}") + test_catalog.create_namespace(namespace=database_name) + test_catalog.create_namespace(namespace=new_database_name) + + # Wrong TABLE_TYPE param + # pylint: disable=W0212 + test_catalog._put_dynamo_item( + item={ + DYNAMODB_COL_IDENTIFIER: {"S": f"{database_name}.{table_name}"}, + DYNAMODB_COL_NAMESPACE: {"S": database_name}, + DYNAMODB_COL_CREATED_AT: {"S": "test-1873287263487623"}, + _add_property_prefix(TABLE_TYPE): {"S": "non-iceberg-table-type"}, + _add_property_prefix(METADATA_LOCATION): {"S": "test-metadata-location"}, + }, + condition_expression=f"attribute_not_exists({DYNAMODB_COL_IDENTIFIER})", + ) + + with pytest.raises(NoSuchIcebergTableError): + test_catalog.rename_table(identifier, new_identifier) + + +@mock_dynamodb +def test_list_tables( + _bucket_initialize: None, _patch_aiobotocore: None, table_schema_nested: Schema, database_name: str, table_list: List[str] +) -> None: + test_catalog = DynamoDbCatalog( + "test_ddb_catalog", **{"warehouse": f"s3://{BUCKET_NAME}", "py-io-impl": "pyiceberg.io.fsspec.FsspecFileIO"} + ) + test_catalog.create_namespace(namespace=database_name) + for table_name in table_list: + test_catalog.create_table((database_name, table_name), table_schema_nested) + loaded_table_list = test_catalog.list_tables(database_name) + for table_name in table_list: + assert (database_name, table_name) in loaded_table_list + + +@mock_dynamodb +def test_list_namespaces(_bucket_initialize: None, _patch_aiobotocore: None, database_list: List[str]) -> None: + test_catalog = DynamoDbCatalog("test_ddb_catalog") + for database_name in database_list: + test_catalog.create_namespace(namespace=database_name) + loaded_database_list = test_catalog.list_namespaces() + for database_name in database_list: + assert (database_name,) in loaded_database_list + + +@mock_dynamodb +def test_create_namespace_no_properties(_bucket_initialize: None, _patch_aiobotocore: None, database_name: str) -> None: + test_catalog = DynamoDbCatalog("test_ddb_catalog") + test_catalog.create_namespace(namespace=database_name) + loaded_database_list = test_catalog.list_namespaces() + assert len(loaded_database_list) == 1 + assert (database_name,) in loaded_database_list + properties = test_catalog.load_namespace_properties(database_name) + assert properties == {} + + +@mock_dynamodb +def test_create_namespace_with_comment_and_location( + _bucket_initialize: None, _patch_aiobotocore: None, database_name: str +) -> None: + test_location = f"s3://{BUCKET_NAME}/{database_name}.db" + test_properties = { + "comment": "this is a test description", + "location": test_location, + } + test_catalog = DynamoDbCatalog("test_ddb_catalog") + test_catalog.create_namespace(namespace=database_name, properties=test_properties) + loaded_database_list = test_catalog.list_namespaces() + assert len(loaded_database_list) == 1 + assert (database_name,) in loaded_database_list + properties = test_catalog.load_namespace_properties(database_name) + assert properties["comment"] == "this is a test description" + assert properties["location"] == test_location + + +@mock_dynamodb +def test_create_duplicated_namespace(_bucket_initialize: None, _patch_aiobotocore: None, database_name: str) -> None: + test_catalog = DynamoDbCatalog("test_ddb_catalog") + test_catalog.create_namespace(namespace=database_name) + loaded_database_list = test_catalog.list_namespaces() + assert len(loaded_database_list) == 1 + assert (database_name,) in loaded_database_list + with pytest.raises(NamespaceAlreadyExistsError): + test_catalog.create_namespace(namespace=database_name, properties={"test": "test"}) + + +@mock_dynamodb +def test_drop_namespace(_bucket_initialize: None, _patch_aiobotocore: None, database_name: str) -> None: + test_catalog = DynamoDbCatalog("test_ddb_catalog") + test_catalog.create_namespace(namespace=database_name) + loaded_database_list = test_catalog.list_namespaces() + assert len(loaded_database_list) == 1 + assert (database_name,) in loaded_database_list + test_catalog.drop_namespace(database_name) + loaded_database_list = test_catalog.list_namespaces() + assert len(loaded_database_list) == 0 + + +@mock_dynamodb +def test_drop_non_empty_namespace( + _bucket_initialize: None, _patch_aiobotocore: None, table_schema_nested: Schema, database_name: str, table_name: str +) -> None: + identifier = (database_name, table_name) + test_catalog = DynamoDbCatalog( + "test_ddb_catalog", **{"warehouse": f"s3://{BUCKET_NAME}", "py-io-impl": "pyiceberg.io.fsspec.FsspecFileIO"} + ) + test_catalog.create_namespace(namespace=database_name) + test_catalog.create_table(identifier, table_schema_nested) + assert len(test_catalog.list_tables(database_name)) == 1 + with pytest.raises(NamespaceNotEmptyError): + test_catalog.drop_namespace(database_name) + + +@mock_dynamodb +def test_drop_non_exist_namespace(_bucket_initialize: None, _patch_aiobotocore: None, database_name: str) -> None: + test_catalog = DynamoDbCatalog("test_ddb_catalog") + with pytest.raises(NoSuchNamespaceError): + test_catalog.drop_namespace(database_name) + + +@mock_dynamodb +def test_load_namespace_properties(_bucket_initialize: None, _patch_aiobotocore: None, database_name: str) -> None: + test_location = f"s3://{BUCKET_NAME}/{database_name}.db" + test_properties = { + "comment": "this is a test description", + "location": test_location, + "test_property1": "1", + "test_property2": "2", + "test_property3": "3", + } + test_catalog = DynamoDbCatalog("test_ddb_catalog") + test_catalog.create_namespace(database_name, test_properties) + listed_properties = test_catalog.load_namespace_properties(database_name) + for k, v in listed_properties.items(): + assert k in test_properties + assert v == test_properties[k] + + +@mock_dynamodb +def test_load_non_exist_namespace_properties(_bucket_initialize: None, _patch_aiobotocore: None, database_name: str) -> None: + test_catalog = DynamoDbCatalog("test_ddb_catalog") + with pytest.raises(NoSuchNamespaceError): + test_catalog.load_namespace_properties(database_name) + + +@mock_dynamodb +def test_update_namespace_properties(_bucket_initialize: None, _patch_aiobotocore: None, database_name: str) -> None: + test_properties = { + "comment": "this is a test description", + "location": f"s3://{BUCKET_NAME}/{database_name}.db", + "test_property1": "1", + "test_property2": "2", + "test_property3": "3", + } + removals = {"test_property1", "test_property2", "test_property3", "should_not_removed"} + updates = {"test_property4": "4", "test_property5": "5", "comment": "updated test description"} + test_catalog = DynamoDbCatalog("test_ddb_catalog") + test_catalog.create_namespace(database_name, test_properties) + update_report = test_catalog.update_namespace_properties(database_name, removals, updates) + for k in updates.keys(): + assert k in update_report.updated + for k in removals: + if k == "should_not_removed": + assert k in update_report.missing + else: + assert k in update_report.removed + assert "updated test description" == test_catalog.load_namespace_properties(database_name)["comment"] + test_catalog.drop_namespace(database_name) + + +@mock_dynamodb +def test_load_empty_namespace_properties(_bucket_initialize: None, _patch_aiobotocore: None, database_name: str) -> None: + test_catalog = DynamoDbCatalog("test_ddb_catalog") + test_catalog.create_namespace(database_name) + listed_properties = test_catalog.load_namespace_properties(database_name) + assert listed_properties == {} + + +@mock_dynamodb +def test_update_namespace_properties_overlap_update_removal( + _bucket_initialize: None, _patch_aiobotocore: None, database_name: str +) -> None: + test_properties = { + "comment": "this is a test description", + "location": f"s3://{BUCKET_NAME}/{database_name}.db", + "test_property1": "1", + "test_property2": "2", + "test_property3": "3", + } + removals = {"test_property1", "test_property2", "test_property3", "should_not_removed"} + updates = {"test_property1": "4", "test_property5": "5", "comment": "updated test description"} + test_catalog = DynamoDbCatalog("test_ddb_catalog") + test_catalog.create_namespace(database_name, test_properties) + with pytest.raises(ValueError): + test_catalog.update_namespace_properties(database_name, removals, updates) + # should not modify the properties + assert test_catalog.load_namespace_properties(database_name) == test_properties diff --git a/tests/catalog/test_glue.py b/tests/catalog/test_glue.py new file mode 100644 index 0000000000..1d7027a216 --- /dev/null +++ b/tests/catalog/test_glue.py @@ -0,0 +1,459 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +from typing import Any, Dict, List +from unittest import mock + +import pytest +from moto import mock_glue + +from pyiceberg.catalog.glue import GlueCatalog +from pyiceberg.exceptions import ( + NamespaceAlreadyExistsError, + NamespaceNotEmptyError, + NoSuchIcebergTableError, + NoSuchNamespaceError, + NoSuchPropertyException, + NoSuchTableError, + TableAlreadyExistsError, +) +from pyiceberg.schema import Schema +from tests.conftest import BUCKET_NAME, TABLE_METADATA_LOCATION_REGEX + + +@mock_glue +def test_create_table_with_database_location( + _bucket_initialize: None, _patch_aiobotocore: None, table_schema_nested: Schema, database_name: str, table_name: str +) -> None: + catalog_name = "glue" + identifier = (database_name, table_name) + test_catalog = GlueCatalog(catalog_name, **{"py-io-impl": "pyiceberg.io.fsspec.FsspecFileIO"}) + test_catalog.create_namespace(namespace=database_name, properties={"location": f"s3://{BUCKET_NAME}/{database_name}.db"}) + table = test_catalog.create_table(identifier, table_schema_nested) + assert table.identifier == (catalog_name,) + identifier + assert TABLE_METADATA_LOCATION_REGEX.match(table.metadata_location) + + +@mock_glue +def test_create_table_with_default_warehouse( + _bucket_initialize: None, _patch_aiobotocore: None, table_schema_nested: Schema, database_name: str, table_name: str +) -> None: + catalog_name = "glue" + identifier = (database_name, table_name) + test_catalog = GlueCatalog( + catalog_name, **{"py-io-impl": "pyiceberg.io.fsspec.FsspecFileIO", "warehouse": f"s3://{BUCKET_NAME}"} + ) + test_catalog.create_namespace(namespace=database_name) + table = test_catalog.create_table(identifier, table_schema_nested) + assert table.identifier == (catalog_name,) + identifier + assert TABLE_METADATA_LOCATION_REGEX.match(table.metadata_location) + + +@mock_glue +def test_create_table_with_given_location( + _bucket_initialize: None, _patch_aiobotocore: None, table_schema_nested: Schema, database_name: str, table_name: str +) -> None: + catalog_name = "glue" + identifier = (database_name, table_name) + test_catalog = GlueCatalog(catalog_name, **{"py-io-impl": "pyiceberg.io.fsspec.FsspecFileIO"}) + test_catalog.create_namespace(namespace=database_name) + table = test_catalog.create_table( + identifier=identifier, schema=table_schema_nested, location=f"s3://{BUCKET_NAME}/{database_name}.db/{table_name}" + ) + assert table.identifier == (catalog_name,) + identifier + assert TABLE_METADATA_LOCATION_REGEX.match(table.metadata_location) + + +@mock_glue +def test_create_table_with_no_location( + _bucket_initialize: None, _patch_aiobotocore: None, table_schema_nested: Schema, database_name: str, table_name: str +) -> None: + catalog_name = "glue" + identifier = (database_name, table_name) + test_catalog = GlueCatalog(catalog_name, **{"py-io-impl": "pyiceberg.io.fsspec.FsspecFileIO"}) + test_catalog.create_namespace(namespace=database_name) + with pytest.raises(ValueError): + test_catalog.create_table(identifier=identifier, schema=table_schema_nested) + + +@mock_glue +def test_create_table_with_strips( + _bucket_initialize: None, _patch_aiobotocore: None, table_schema_nested: Schema, database_name: str, table_name: str +) -> None: + catalog_name = "glue" + identifier = (database_name, table_name) + test_catalog = GlueCatalog(catalog_name, **{"py-io-impl": "pyiceberg.io.fsspec.FsspecFileIO"}) + test_catalog.create_namespace(namespace=database_name, properties={"location": f"s3://{BUCKET_NAME}/{database_name}.db/"}) + table = test_catalog.create_table(identifier, table_schema_nested) + assert table.identifier == (catalog_name,) + identifier + assert TABLE_METADATA_LOCATION_REGEX.match(table.metadata_location) + + +@mock_glue +def test_create_table_with_strips_bucket_root( + _bucket_initialize: None, _patch_aiobotocore: None, table_schema_nested: Schema, database_name: str, table_name: str +) -> None: + catalog_name = "glue" + identifier = (database_name, table_name) + test_catalog = GlueCatalog("glue", **{"py-io-impl": "pyiceberg.io.fsspec.FsspecFileIO", "warehouse": f"s3://{BUCKET_NAME}/"}) + test_catalog.create_namespace(namespace=database_name) + table_strip = test_catalog.create_table(identifier, table_schema_nested) + assert table_strip.identifier == (catalog_name,) + identifier + assert TABLE_METADATA_LOCATION_REGEX.match(table_strip.metadata_location) + + +@mock_glue +def test_create_table_with_no_database( + _bucket_initialize: None, _patch_aiobotocore: None, table_schema_nested: Schema, database_name: str, table_name: str +) -> None: + identifier = (database_name, table_name) + test_catalog = GlueCatalog("glue", **{"py-io-impl": "pyiceberg.io.fsspec.FsspecFileIO"}) + with pytest.raises(NoSuchNamespaceError): + test_catalog.create_table(identifier=identifier, schema=table_schema_nested) + + +@mock_glue +def test_create_duplicated_table( + _bucket_initialize: None, _patch_aiobotocore: None, table_schema_nested: Schema, database_name: str, table_name: str +) -> None: + identifier = (database_name, table_name) + test_catalog = GlueCatalog("glue", **{"py-io-impl": "pyiceberg.io.fsspec.FsspecFileIO", "warehouse": f"s3://{BUCKET_NAME}/"}) + test_catalog.create_namespace(namespace=database_name) + test_catalog.create_table(identifier, table_schema_nested) + with pytest.raises(TableAlreadyExistsError): + test_catalog.create_table(identifier, table_schema_nested) + + +@mock_glue +def test_load_table( + _bucket_initialize: None, _patch_aiobotocore: None, table_schema_nested: Schema, database_name: str, table_name: str +) -> None: + catalog_name = "glue" + identifier = (database_name, table_name) + test_catalog = GlueCatalog( + catalog_name, **{"py-io-impl": "pyiceberg.io.fsspec.FsspecFileIO", "warehouse": f"s3://{BUCKET_NAME}/"} + ) + test_catalog.create_namespace(namespace=database_name) + test_catalog.create_table(identifier, table_schema_nested) + table = test_catalog.load_table(identifier) + assert table.identifier == (catalog_name,) + identifier + assert TABLE_METADATA_LOCATION_REGEX.match(table.metadata_location) + + +@mock_glue +def test_load_non_exist_table(_bucket_initialize: None, _patch_aiobotocore: None, database_name: str, table_name: str) -> None: + identifier = (database_name, table_name) + test_catalog = GlueCatalog("glue", **{"py-io-impl": "pyiceberg.io.fsspec.FsspecFileIO", "warehouse": f"s3://{BUCKET_NAME}/"}) + test_catalog.create_namespace(namespace=database_name) + with pytest.raises(NoSuchTableError): + test_catalog.load_table(identifier) + + +@mock_glue +def test_drop_table( + _bucket_initialize: None, _patch_aiobotocore: None, table_schema_nested: Schema, database_name: str, table_name: str +) -> None: + catalog_name = "glue" + identifier = (database_name, table_name) + test_catalog = GlueCatalog( + catalog_name, **{"py-io-impl": "pyiceberg.io.fsspec.FsspecFileIO", "warehouse": f"s3://{BUCKET_NAME}/"} + ) + test_catalog.create_namespace(namespace=database_name) + test_catalog.create_table(identifier, table_schema_nested) + table = test_catalog.load_table(identifier) + assert table.identifier == (catalog_name,) + identifier + assert TABLE_METADATA_LOCATION_REGEX.match(table.metadata_location) + test_catalog.drop_table(identifier) + with pytest.raises(NoSuchTableError): + test_catalog.load_table(identifier) + + +@mock_glue +def test_drop_non_exist_table(_bucket_initialize: None, _patch_aiobotocore: None, database_name: str, table_name: str) -> None: + identifier = (database_name, table_name) + test_catalog = GlueCatalog("glue", **{"py-io-impl": "pyiceberg.io.fsspec.FsspecFileIO", "warehouse": f"s3://{BUCKET_NAME}/"}) + with pytest.raises(NoSuchTableError): + test_catalog.drop_table(identifier) + + +@mock_glue +def test_rename_table( + _bucket_initialize: None, _patch_aiobotocore: None, table_schema_nested: Schema, database_name: str, table_name: str +) -> None: + catalog_name = "glue" + new_table_name = f"{table_name}_new" + identifier = (database_name, table_name) + new_identifier = (database_name, new_table_name) + test_catalog = GlueCatalog("glue", **{"py-io-impl": "pyiceberg.io.fsspec.FsspecFileIO", "warehouse": f"s3://{BUCKET_NAME}/"}) + test_catalog.create_namespace(namespace=database_name) + table = test_catalog.create_table(identifier, table_schema_nested) + assert table.identifier == (catalog_name,) + identifier + assert TABLE_METADATA_LOCATION_REGEX.match(table.metadata_location) + test_catalog.rename_table(identifier, new_identifier) + new_table = test_catalog.load_table(new_identifier) + assert new_table.identifier == (catalog_name,) + new_identifier + # the metadata_location should not change + assert new_table.metadata_location == table.metadata_location + # old table should be dropped + with pytest.raises(NoSuchTableError): + test_catalog.load_table(identifier) + + +@mock_glue +def test_rename_table_no_params(_glue, _bucket_initialize: None, _patch_aiobotocore: None, database_name: str, table_name: str) -> None: # type: ignore + new_database_name = f"{database_name}_new" + new_table_name = f"{table_name}_new" + identifier = (database_name, table_name) + new_identifier = (new_database_name, new_table_name) + test_catalog = GlueCatalog("glue", **{"py-io-impl": "pyiceberg.io.fsspec.FsspecFileIO", "warehouse": f"s3://{BUCKET_NAME}/"}) + test_catalog.create_namespace(namespace=database_name) + test_catalog.create_namespace(namespace=new_database_name) + _glue.create_table( + DatabaseName=database_name, + TableInput={"Name": table_name, "TableType": "EXTERNAL_TABLE", "Parameters": {"table_type": "iceberg"}}, + ) + with pytest.raises(NoSuchPropertyException): + test_catalog.rename_table(identifier, new_identifier) + + +@mock_glue +def test_rename_non_iceberg_table(_glue, _bucket_initialize: None, _patch_aiobotocore: None, database_name: str, table_name: str) -> None: # type: ignore + new_database_name = f"{database_name}_new" + new_table_name = f"{table_name}_new" + identifier = (database_name, table_name) + new_identifier = (new_database_name, new_table_name) + test_catalog = GlueCatalog("glue", **{"py-io-impl": "pyiceberg.io.fsspec.FsspecFileIO", "warehouse": f"s3://{BUCKET_NAME}/"}) + test_catalog.create_namespace(namespace=database_name) + test_catalog.create_namespace(namespace=new_database_name) + _glue.create_table( + DatabaseName=database_name, + TableInput={ + "Name": table_name, + "TableType": "EXTERNAL_TABLE", + "Parameters": {"table_type": "noniceberg", "metadata_location": "test"}, + }, + ) + with pytest.raises(NoSuchIcebergTableError): + test_catalog.rename_table(identifier, new_identifier) + + +@mock_glue +def test_list_tables( + _bucket_initialize: None, + _patch_aiobotocore: None, + table_schema_nested: Schema, + database_name: str, + table_name: str, + table_list: List[str], +) -> None: + test_catalog = GlueCatalog("glue", **{"py-io-impl": "pyiceberg.io.fsspec.FsspecFileIO", "warehouse": f"s3://{BUCKET_NAME}/"}) + test_catalog.create_namespace(namespace=database_name) + for table_name in table_list: + test_catalog.create_table((database_name, table_name), table_schema_nested) + loaded_table_list = test_catalog.list_tables(database_name) + for table_name in table_list: + assert (database_name, table_name) in loaded_table_list + + +@mock_glue +def test_list_namespaces(_bucket_initialize: None, _patch_aiobotocore: None, database_list: List[str]) -> None: + test_catalog = GlueCatalog("glue", **{"py-io-impl": "pyiceberg.io.fsspec.FsspecFileIO"}) + for database_name in database_list: + test_catalog.create_namespace(namespace=database_name) + loaded_database_list = test_catalog.list_namespaces() + for database_name in database_list: + assert (database_name,) in loaded_database_list + + +@mock_glue +def test_create_namespace_no_properties(_bucket_initialize: None, _patch_aiobotocore: None, database_name: str) -> None: + test_catalog = GlueCatalog("glue", **{"py-io-impl": "pyiceberg.io.fsspec.FsspecFileIO"}) + test_catalog.create_namespace(namespace=database_name) + loaded_database_list = test_catalog.list_namespaces() + assert len(loaded_database_list) == 1 + assert (database_name,) in loaded_database_list + properties = test_catalog.load_namespace_properties(database_name) + assert properties == {} + + +@mock_glue +def test_create_namespace_with_comment_and_location( + _bucket_initialize: None, _patch_aiobotocore: None, database_name: str +) -> None: + test_location = f"s3://{BUCKET_NAME}/{database_name}.db" + test_properties = { + "comment": "this is a test description", + "location": test_location, + } + test_catalog = GlueCatalog("glue", **{"py-io-impl": "pyiceberg.io.fsspec.FsspecFileIO"}) + test_catalog.create_namespace(namespace=database_name, properties=test_properties) + loaded_database_list = test_catalog.list_namespaces() + assert len(loaded_database_list) == 1 + assert (database_name,) in loaded_database_list + properties = test_catalog.load_namespace_properties(database_name) + assert properties["comment"] == "this is a test description" + assert properties["location"] == test_location + + +@mock_glue +def test_create_duplicated_namespace(_bucket_initialize: None, _patch_aiobotocore: None, database_name: str) -> None: + test_catalog = GlueCatalog("glue", **{"py-io-impl": "pyiceberg.io.fsspec.FsspecFileIO"}) + test_catalog.create_namespace(namespace=database_name) + loaded_database_list = test_catalog.list_namespaces() + assert len(loaded_database_list) == 1 + assert (database_name,) in loaded_database_list + with pytest.raises(NamespaceAlreadyExistsError): + test_catalog.create_namespace(namespace=database_name, properties={"test": "test"}) + + +@mock_glue +def test_drop_namespace(_bucket_initialize: None, _patch_aiobotocore: None, database_name: str) -> None: + test_catalog = GlueCatalog("glue", **{"py-io-impl": "pyiceberg.io.fsspec.FsspecFileIO"}) + test_catalog.create_namespace(namespace=database_name) + loaded_database_list = test_catalog.list_namespaces() + assert len(loaded_database_list) == 1 + assert (database_name,) in loaded_database_list + test_catalog.drop_namespace(database_name) + loaded_database_list = test_catalog.list_namespaces() + assert len(loaded_database_list) == 0 + + +@mock_glue +def test_drop_non_empty_namespace( + _bucket_initialize: None, _patch_aiobotocore: None, table_schema_nested: Schema, database_name: str, table_name: str +) -> None: + identifier = (database_name, table_name) + test_catalog = GlueCatalog("glue", **{"py-io-impl": "pyiceberg.io.fsspec.FsspecFileIO", "warehouse": f"s3://{BUCKET_NAME}/"}) + test_catalog.create_namespace(namespace=database_name) + test_catalog.create_table(identifier, table_schema_nested) + assert len(test_catalog.list_tables(database_name)) == 1 + with pytest.raises(NamespaceNotEmptyError): + test_catalog.drop_namespace(database_name) + + +@mock_glue +def test_drop_non_exist_namespace(_bucket_initialize: None, _patch_aiobotocore: None, database_name: str) -> None: + test_catalog = GlueCatalog("glue", **{"py-io-impl": "pyiceberg.io.fsspec.FsspecFileIO"}) + with pytest.raises(NoSuchNamespaceError): + test_catalog.drop_namespace(database_name) + + +@mock_glue +def test_load_namespace_properties(_bucket_initialize: None, _patch_aiobotocore: None, database_name: str) -> None: + test_location = f"s3://{BUCKET_NAME}/{database_name}.db" + test_properties = { + "comment": "this is a test description", + "location": test_location, + "test_property1": "1", + "test_property2": "2", + "test_property3": "3", + } + test_catalog = GlueCatalog("glue", **{"py-io-impl": "pyiceberg.io.fsspec.FsspecFileIO"}) + test_catalog.create_namespace(database_name, test_properties) + listed_properties = test_catalog.load_namespace_properties(database_name) + for k, v in listed_properties.items(): + assert k in test_properties + assert v == test_properties[k] + + +@mock_glue +def test_load_non_exist_namespace_properties(_bucket_initialize: None, _patch_aiobotocore: None, database_name: str) -> None: + test_catalog = GlueCatalog("glue", **{"py-io-impl": "pyiceberg.io.fsspec.FsspecFileIO"}) + with pytest.raises(NoSuchNamespaceError): + test_catalog.load_namespace_properties(database_name) + + +@mock_glue +def test_update_namespace_properties(_bucket_initialize: None, _patch_aiobotocore: None, database_name: str) -> None: + test_properties = { + "comment": "this is a test description", + "location": f"s3://{BUCKET_NAME}/{database_name}.db", + "test_property1": "1", + "test_property2": "2", + "test_property3": "3", + } + removals = {"test_property1", "test_property2", "test_property3", "should_not_removed"} + updates = {"test_property4": "4", "test_property5": "5", "comment": "updated test description"} + test_catalog = GlueCatalog("glue", **{"py-io-impl": "pyiceberg.io.fsspec.FsspecFileIO"}) + test_catalog.create_namespace(database_name, test_properties) + update_report = test_catalog.update_namespace_properties(database_name, removals, updates) + for k in updates.keys(): + assert k in update_report.updated + for k in removals: + if k == "should_not_removed": + assert k in update_report.missing + else: + assert k in update_report.removed + assert "updated test description" == test_catalog.load_namespace_properties(database_name)["comment"] + test_catalog.drop_namespace(database_name) + + +@mock_glue +def test_load_empty_namespace_properties(_bucket_initialize: None, _patch_aiobotocore: None, database_name: str) -> None: + test_catalog = GlueCatalog("glue", **{"py-io-impl": "pyiceberg.io.fsspec.FsspecFileIO"}) + test_catalog.create_namespace(database_name) + listed_properties = test_catalog.load_namespace_properties(database_name) + assert listed_properties == {} + + +@mock_glue +def test_load_default_namespace_properties(_glue, _bucket_initialize, _patch_aiobotocore, database_name: str) -> None: # type: ignore + # simulate creating database with default settings through AWS Glue Web Console + _glue.create_database(DatabaseInput={"Name": database_name}) + test_catalog = GlueCatalog("glue", **{"py-io-impl": "pyiceberg.io.fsspec.FsspecFileIO"}) + listed_properties = test_catalog.load_namespace_properties(database_name) + assert listed_properties == {} + + +@mock_glue +def test_update_namespace_properties_overlap_update_removal( + _bucket_initialize: None, _patch_aiobotocore: None, database_name: str +) -> None: + test_properties = { + "comment": "this is a test description", + "location": f"s3://{BUCKET_NAME}/{database_name}.db", + "test_property1": "1", + "test_property2": "2", + "test_property3": "3", + } + removals = {"test_property1", "test_property2", "test_property3", "should_not_removed"} + updates = {"test_property1": "4", "test_property5": "5", "comment": "updated test description"} + test_catalog = GlueCatalog("glue", **{"py-io-impl": "pyiceberg.io.fsspec.FsspecFileIO"}) + test_catalog.create_namespace(database_name, test_properties) + with pytest.raises(ValueError): + test_catalog.update_namespace_properties(database_name, removals, updates) + # should not modify the properties + assert test_catalog.load_namespace_properties(database_name) == test_properties + + +@mock_glue +def test_passing_profile_name() -> None: + session_properties: Dict[str, Any] = { + "aws_access_key_id": "abc", + "aws_secret_access_key": "def", + "aws_session_token": "ghi", + "region_name": "eu-central-1", + "profile_name": "sandbox", + "botocore_session": None, + } + test_properties = {"type": "glue"} + test_properties.update(session_properties) + + with mock.patch("boto3.Session") as mock_session: + test_catalog = GlueCatalog("glue", **test_properties) + + mock_session.assert_called_with(**session_properties) + assert test_catalog.glue is mock_session().client() diff --git a/tests/catalog/test_hive.py b/tests/catalog/test_hive.py new file mode 100644 index 0000000000..9e3dd1a945 --- /dev/null +++ b/tests/catalog/test_hive.py @@ -0,0 +1,698 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# pylint: disable=protected-access,redefined-outer-name +import uuid +from unittest.mock import MagicMock, patch + +import pytest +from hive_metastore.ttypes import ( + AlreadyExistsException, + FieldSchema, + InvalidOperationException, + MetaException, + NoSuchObjectException, + SerDeInfo, + SkewedInfo, + StorageDescriptor, +) +from hive_metastore.ttypes import Database as HiveDatabase +from hive_metastore.ttypes import Table as HiveTable + +from pyiceberg.catalog import PropertiesUpdateSummary +from pyiceberg.catalog.hive import HiveCatalog, _construct_hive_storage_descriptor +from pyiceberg.exceptions import ( + NamespaceAlreadyExistsError, + NamespaceNotEmptyError, + NoSuchNamespaceError, + NoSuchTableError, +) +from pyiceberg.partitioning import PartitionField, PartitionSpec +from pyiceberg.schema import Schema +from pyiceberg.table.metadata import TableMetadataUtil, TableMetadataV2 +from pyiceberg.table.refs import SnapshotRef, SnapshotRefType +from pyiceberg.table.snapshots import ( + MetadataLogEntry, + Operation, + Snapshot, + SnapshotLogEntry, + Summary, +) +from pyiceberg.table.sorting import ( + NullOrder, + SortDirection, + SortField, + SortOrder, +) +from pyiceberg.transforms import BucketTransform, IdentityTransform +from pyiceberg.types import ( + BooleanType, + IntegerType, + LongType, + NestedField, + StringType, +) + +HIVE_CATALOG_NAME = "hive" +HIVE_METASTORE_FAKE_URL = "thrift://unknown:9083" + + +@pytest.fixture +def hive_table(metadata_location: str) -> HiveTable: + return HiveTable( + tableName="new_tabl2e", + dbName="default", + owner="fokkodriesprong", + createTime=1659092339, + lastAccessTime=1659092, + retention=0, + sd=StorageDescriptor( + cols=[ + FieldSchema(name="foo", type="string", comment=None), + FieldSchema(name="bar", type="int", comment=None), + FieldSchema(name="baz", type="boolean", comment=None), + ], + location="file:/tmp/new_tabl2e", + inputFormat="org.apache.hadoop.mapred.FileInputFormat", + outputFormat="org.apache.hadoop.mapred.FileOutputFormat", + compressed=False, + numBuckets=0, + serdeInfo=SerDeInfo( + name=None, + serializationLib="org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe", + parameters={}, + description=None, + serializerClass=None, + deserializerClass=None, + serdeType=None, + ), + bucketCols=[], + sortCols=[], + parameters={}, + skewedInfo=SkewedInfo(skewedColNames=[], skewedColValues=[], skewedColValueLocationMaps={}), + storedAsSubDirectories=False, + ), + partitionKeys=[], + parameters={ + "EXTERNAL": "TRUE", + "transient_lastDdlTime": "1659092339", + "table_type": "ICEBERG", + "metadata_location": metadata_location, + }, + viewOriginalText=None, + viewExpandedText=None, + tableType="EXTERNAL_TABLE", + privileges=None, + temporary=False, + rewriteEnabled=False, + creationMetadata=None, + catName="hive", + ownerType=1, + writeId=-1, + isStatsCompliant=None, + colStats=None, + accessType=None, + requiredReadCapabilities=None, + requiredWriteCapabilities=None, + id=None, + fileMetadata=None, + dictionary=None, + txnId=None, + ) + + +@pytest.fixture(scope="session") +def hive_database(tmp_path_factory: pytest.TempPathFactory) -> HiveDatabase: + # Pre-create the directory, this has to be done because + # of a local FS. Not needed with an actual object store. + database_path = tmp_path_factory.mktemp("database") + manifest_path = database_path / "database" / "table" / "metadata" + manifest_path.mkdir(parents=True) + return HiveDatabase( + name="default", + description=None, + locationUri=str(database_path / "database"), + parameters={"test": "property"}, + privileges=None, + ownerName=None, + ownerType=1, + catalogName="hive", + createTime=None, + managedLocationUri=None, + type=None, + connector_name=None, + remote_dbname=None, + ) + + +def test_no_uri_supplied() -> None: + with pytest.raises(KeyError): + HiveCatalog("production") + + +def test_check_number_of_namespaces(table_schema_simple: Schema) -> None: + catalog = HiveCatalog(HIVE_CATALOG_NAME, uri=HIVE_METASTORE_FAKE_URL) + + with pytest.raises(ValueError): + catalog.create_table(("default", "namespace", "table"), schema=table_schema_simple) + + with pytest.raises(ValueError): + catalog.create_table("default.namespace.table", schema=table_schema_simple) + + with pytest.raises(ValueError): + catalog.create_table(("table",), schema=table_schema_simple) + + with pytest.raises(ValueError): + catalog.create_table("table", schema=table_schema_simple) + + +@patch("time.time", MagicMock(return_value=12345)) +def test_create_table(table_schema_simple: Schema, hive_database: HiveDatabase, hive_table: HiveTable) -> None: + catalog = HiveCatalog(HIVE_CATALOG_NAME, uri=HIVE_METASTORE_FAKE_URL) + + catalog._client = MagicMock() + catalog._client.__enter__().create_table.return_value = None + catalog._client.__enter__().get_table.return_value = hive_table + catalog._client.__enter__().get_database.return_value = hive_database + catalog.create_table(("default", "table"), schema=table_schema_simple, properties={"owner": "javaberg"}) + + called_hive_table: HiveTable = catalog._client.__enter__().create_table.call_args[0][0] + # This one is generated within the function itself, so we need to extract + # it to construct the assert_called_with + metadata_location: str = called_hive_table.parameters["metadata_location"] + assert metadata_location.endswith(".metadata.json") + assert "/database/table/metadata/" in metadata_location + catalog._client.__enter__().create_table.assert_called_with( + HiveTable( + tableName="table", + dbName="default", + owner="javaberg", + createTime=12345, + lastAccessTime=12345, + retention=None, + sd=StorageDescriptor( + cols=[ + FieldSchema(name="foo", type="string", comment=None), + FieldSchema(name="bar", type="int", comment=None), + FieldSchema(name="baz", type="boolean", comment=None), + ], + location=f"{hive_database.locationUri}/table", + inputFormat="org.apache.hadoop.mapred.FileInputFormat", + outputFormat="org.apache.hadoop.mapred.FileOutputFormat", + compressed=None, + numBuckets=None, + serdeInfo=SerDeInfo( + name=None, + serializationLib="org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe", + parameters=None, + description=None, + serializerClass=None, + deserializerClass=None, + serdeType=None, + ), + bucketCols=None, + sortCols=None, + parameters=None, + skewedInfo=None, + storedAsSubDirectories=None, + ), + partitionKeys=None, + parameters={"EXTERNAL": "TRUE", "table_type": "ICEBERG", "metadata_location": metadata_location}, + viewOriginalText=None, + viewExpandedText=None, + tableType="EXTERNAL_TABLE", + privileges=None, + temporary=False, + rewriteEnabled=None, + creationMetadata=None, + catName=None, + ownerType=1, + writeId=-1, + isStatsCompliant=None, + colStats=None, + accessType=None, + requiredReadCapabilities=None, + requiredWriteCapabilities=None, + id=None, + fileMetadata=None, + dictionary=None, + txnId=None, + ) + ) + + with open(metadata_location, encoding="utf-8") as f: + payload = f.read() + + metadata = TableMetadataUtil.parse_raw(payload) + + assert "database/table" in metadata.location + + expected = TableMetadataV2( + location=metadata.location, + table_uuid=metadata.table_uuid, + last_updated_ms=metadata.last_updated_ms, + last_column_id=3, + schemas=[ + Schema( + NestedField(field_id=1, name="foo", field_type=StringType(), required=False), + NestedField(field_id=2, name="bar", field_type=IntegerType(), required=True), + NestedField(field_id=3, name="baz", field_type=BooleanType(), required=False), + schema_id=0, + identifier_field_ids=[2], + ) + ], + current_schema_id=0, + last_partition_id=1000, + properties={"owner": "javaberg"}, + partition_specs=[PartitionSpec()], + default_spec_id=0, + current_snapshot_id=None, + snapshots=[], + snapshot_log=[], + metadata_log=[], + sort_orders=[SortOrder(order_id=0)], + default_sort_order_id=0, + refs={}, + format_version=2, + last_sequence_number=0, + ) + + assert metadata.model_dump() == expected.model_dump() + + +def test_load_table(hive_table: HiveTable) -> None: + catalog = HiveCatalog(HIVE_CATALOG_NAME, uri=HIVE_METASTORE_FAKE_URL) + + catalog._client = MagicMock() + catalog._client.__enter__().get_table.return_value = hive_table + table = catalog.load_table(("default", "new_tabl2e")) + + catalog._client.__enter__().get_table.assert_called_with(dbname="default", tbl_name="new_tabl2e") + + expected = TableMetadataV2( + location="s3://bucket/test/location", + table_uuid=uuid.UUID("9c12d441-03fe-4693-9a96-a0705ddf69c1"), + last_updated_ms=1602638573590, + last_column_id=3, + schemas=[ + Schema( + NestedField(field_id=1, name="x", field_type=LongType(), required=True), + schema_id=0, + identifier_field_ids=[], + ), + Schema( + NestedField(field_id=1, name="x", field_type=LongType(), required=True), + NestedField(field_id=2, name="y", field_type=LongType(), required=True, doc="comment"), + NestedField(field_id=3, name="z", field_type=LongType(), required=True), + schema_id=1, + identifier_field_ids=[1, 2], + ), + ], + current_schema_id=1, + partition_specs=[ + PartitionSpec(PartitionField(source_id=1, field_id=1000, transform=IdentityTransform(), name="x"), spec_id=0) + ], + default_spec_id=0, + last_partition_id=1000, + properties={"read.split.target.size": "134217728"}, + current_snapshot_id=3055729675574597004, + snapshots=[ + Snapshot( + snapshot_id=3051729675574597004, + parent_snapshot_id=None, + sequence_number=0, + timestamp_ms=1515100955770, + manifest_list="s3://a/b/1.avro", + summary=Summary(operation=Operation.APPEND), + schema_id=None, + ), + Snapshot( + snapshot_id=3055729675574597004, + parent_snapshot_id=3051729675574597004, + sequence_number=1, + timestamp_ms=1555100955770, + manifest_list="s3://a/b/2.avro", + summary=Summary(operation=Operation.APPEND), + schema_id=1, + ), + ], + snapshot_log=[ + SnapshotLogEntry(snapshot_id=3051729675574597004, timestamp_ms=1515100955770), + SnapshotLogEntry(snapshot_id=3055729675574597004, timestamp_ms=1555100955770), + ], + metadata_log=[MetadataLogEntry(metadata_file="s3://bucket/.../v1.json", timestamp_ms=1515100)], + sort_orders=[ + SortOrder( + SortField( + source_id=2, transform=IdentityTransform(), direction=SortDirection.ASC, null_order=NullOrder.NULLS_FIRST + ), + SortField( + source_id=3, + transform=BucketTransform(num_buckets=4), + direction=SortDirection.DESC, + null_order=NullOrder.NULLS_LAST, + ), + order_id=3, + ) + ], + default_sort_order_id=3, + refs={ + "test": SnapshotRef( + snapshot_id=3051729675574597004, + snapshot_ref_type=SnapshotRefType.TAG, + min_snapshots_to_keep=None, + max_snapshot_age_ms=None, + max_ref_age_ms=10000000, + ), + "main": SnapshotRef( + snapshot_id=3055729675574597004, + snapshot_ref_type=SnapshotRefType.BRANCH, + min_snapshots_to_keep=None, + max_snapshot_age_ms=None, + max_ref_age_ms=None, + ), + }, + format_version=2, + last_sequence_number=34, + ) + + assert table.identifier == (HIVE_CATALOG_NAME, "default", "new_tabl2e") + assert expected == table.metadata + + +def test_rename_table_from_does_not_exists() -> None: + catalog = HiveCatalog(HIVE_CATALOG_NAME, uri=HIVE_METASTORE_FAKE_URL) + + catalog._client = MagicMock() + catalog._client.__enter__().alter_table.side_effect = NoSuchObjectException( + message="hive.default.does_not_exists table not found" + ) + + with pytest.raises(NoSuchTableError) as exc_info: + catalog.rename_table(("default", "does_not_exists"), ("default", "new_table")) + + assert "Table does not exist: does_not_exists" in str(exc_info.value) + + +def test_rename_table_to_namespace_does_not_exists() -> None: + catalog = HiveCatalog(HIVE_CATALOG_NAME, uri=HIVE_METASTORE_FAKE_URL) + + catalog._client = MagicMock() + catalog._client.__enter__().alter_table.side_effect = InvalidOperationException( + message="Unable to change partition or table. Database default does not exist Check metastore logs for detailed stack.does_not_exists" + ) + + with pytest.raises(NoSuchNamespaceError) as exc_info: + catalog.rename_table(("default", "does_exists"), ("default_does_not_exists", "new_table")) + + assert "Database does not exists: default_does_not_exists" in str(exc_info.value) + + +def test_drop_database_does_not_empty() -> None: + catalog = HiveCatalog(HIVE_CATALOG_NAME, uri=HIVE_METASTORE_FAKE_URL) + + catalog._client = MagicMock() + catalog._client.__enter__().drop_database.side_effect = InvalidOperationException( + message="Database not_empty is not empty. One or more tables exist." + ) + + with pytest.raises(NamespaceNotEmptyError) as exc_info: + catalog.drop_namespace(("not_empty",)) + + assert "Database not_empty is not empty" in str(exc_info.value) + + +def test_drop_database_does_not_exists() -> None: + catalog = HiveCatalog(HIVE_CATALOG_NAME, uri=HIVE_METASTORE_FAKE_URL) + + catalog._client = MagicMock() + catalog._client.__enter__().drop_database.side_effect = MetaException(message="java.lang.NullPointerException") + + with pytest.raises(NoSuchNamespaceError) as exc_info: + catalog.drop_namespace(("does_not_exists",)) + + assert "Database does not exists: does_not_exists" in str(exc_info.value) + + +def test_list_tables() -> None: + catalog = HiveCatalog(HIVE_CATALOG_NAME, uri=HIVE_METASTORE_FAKE_URL) + + catalog._client = MagicMock() + catalog._client.__enter__().get_all_tables.return_value = ["table1", "table2"] + + assert catalog.list_tables("database") == [ + ( + "database", + "table1", + ), + ( + "database", + "table2", + ), + ] + catalog._client.__enter__().get_all_tables.assert_called_with(db_name="database") + + +def test_list_namespaces() -> None: + catalog = HiveCatalog(HIVE_CATALOG_NAME, uri=HIVE_METASTORE_FAKE_URL) + + catalog._client = MagicMock() + catalog._client.__enter__().get_all_databases.return_value = ["namespace1", "namespace2"] + + assert catalog.list_namespaces() == [("namespace1",), ("namespace2",)] + + catalog._client.__enter__().get_all_databases.assert_called() + + +def test_drop_table() -> None: + catalog = HiveCatalog(HIVE_CATALOG_NAME, uri=HIVE_METASTORE_FAKE_URL) + + catalog._client = MagicMock() + catalog._client.__enter__().get_all_databases.return_value = ["namespace1", "namespace2"] + + catalog.drop_table(("default", "table")) + + catalog._client.__enter__().drop_table.assert_called_with(dbname="default", name="table", deleteData=False) + + +def test_drop_table_does_not_exists() -> None: + catalog = HiveCatalog(HIVE_CATALOG_NAME, uri=HIVE_METASTORE_FAKE_URL) + + catalog._client = MagicMock() + catalog._client.__enter__().drop_table.side_effect = NoSuchObjectException(message="does_not_exists") + + with pytest.raises(NoSuchTableError) as exc_info: + catalog.drop_table(("default", "does_not_exists")) + + assert "Table does not exists: does_not_exists" in str(exc_info.value) + + +def test_purge_table() -> None: + catalog = HiveCatalog(HIVE_CATALOG_NAME, uri=HIVE_METASTORE_FAKE_URL) + + with pytest.raises(NotImplementedError): + catalog.purge_table(("default", "does_not_exists")) + + +def test_create_database() -> None: + catalog = HiveCatalog(HIVE_CATALOG_NAME, uri=HIVE_METASTORE_FAKE_URL) + + catalog._client = MagicMock() + catalog._client.__enter__().create_database.return_value = None + + catalog.create_namespace("default", {"property": "true"}) + + catalog._client.__enter__().create_database.assert_called_with( + HiveDatabase( + name="default", + description=None, + locationUri=None, + parameters={"property": "true"}, + privileges=None, + ownerName=None, + ownerType=None, + catalogName=None, + createTime=None, + managedLocationUri=None, + type=None, + connector_name=None, + remote_dbname=None, + ) + ) + + +def test_create_database_already_exists() -> None: + catalog = HiveCatalog(HIVE_CATALOG_NAME, uri=HIVE_METASTORE_FAKE_URL) + + catalog._client = MagicMock() + catalog._client.__enter__().create_database.side_effect = AlreadyExistsException(message="Database default already exists") + + with pytest.raises(NamespaceAlreadyExistsError) as exc_info: + catalog.create_namespace("default") + + assert "Database default already exists" in str(exc_info.value) + + +def test_load_namespace_properties(hive_database: HiveDatabase) -> None: + catalog = HiveCatalog(HIVE_CATALOG_NAME, uri=HIVE_METASTORE_FAKE_URL) + + catalog._client = MagicMock() + catalog._client.__enter__().get_database.return_value = hive_database + + assert catalog.load_namespace_properties("default2") == {"location": hive_database.locationUri, "test": "property"} + + catalog._client.__enter__().get_database.assert_called_with(name="default2") + + +def test_load_namespace_properties_does_not_exists() -> None: + catalog = HiveCatalog(HIVE_CATALOG_NAME, uri=HIVE_METASTORE_FAKE_URL) + + catalog._client = MagicMock() + catalog._client.__enter__().get_database.side_effect = NoSuchObjectException(message="does_not_exists") + + with pytest.raises(NoSuchNamespaceError) as exc_info: + catalog.load_namespace_properties(("does_not_exists",)) + + assert "Database does not exists: does_not_exists" in str(exc_info.value) + + +def test_update_namespace_properties(hive_database: HiveDatabase) -> None: + catalog = HiveCatalog(HIVE_CATALOG_NAME, uri=HIVE_METASTORE_FAKE_URL) + + catalog._client = MagicMock() + catalog._client.__enter__().get_database.return_value = hive_database + catalog._client.__enter__().alter_database.return_value = None + + assert catalog.update_namespace_properties( + namespace="default", removals={"test", "does_not_exists"}, updates={"label": "core"} + ) == PropertiesUpdateSummary(removed=["test"], updated=["label"], missing=["does_not_exists"]) + + catalog._client.__enter__().alter_database.assert_called_with( + "default", + HiveDatabase( + name="default", + description=None, + locationUri=hive_database.locationUri, + parameters={"test": None, "label": "core"}, + privileges=None, + ownerName=None, + ownerType=1, + catalogName="hive", + createTime=None, + managedLocationUri=None, + type=None, + connector_name=None, + remote_dbname=None, + ), + ) + + +def test_update_namespace_properties_namespace_does_not_exists() -> None: + catalog = HiveCatalog(HIVE_CATALOG_NAME, uri=HIVE_METASTORE_FAKE_URL) + + catalog._client = MagicMock() + catalog._client.__enter__().get_database.side_effect = NoSuchObjectException(message="does_not_exists") + + with pytest.raises(NoSuchNamespaceError) as exc_info: + catalog.update_namespace_properties(("does_not_exists",), removals=set(), updates={}) + + assert "Database does not exists: does_not_exists" in str(exc_info.value) + + +def test_update_namespace_properties_overlap() -> None: + catalog = HiveCatalog(HIVE_CATALOG_NAME, uri=HIVE_METASTORE_FAKE_URL) + + with pytest.raises(ValueError) as exc_info: + catalog.update_namespace_properties(("table",), removals=set("a"), updates={"a": "b"}) + + assert "Updates and deletes have an overlap: {'a'}" in str(exc_info.value) + + +def test_construct_hive_storage_descriptor_simple(table_schema_simple: Schema) -> None: + descriptor = _construct_hive_storage_descriptor(table_schema_simple, "s3://") + assert descriptor == StorageDescriptor( + cols=[ + FieldSchema(name="foo", type="string", comment=None), + FieldSchema(name="bar", type="int", comment=None), + FieldSchema(name="baz", type="boolean", comment=None), + ], + location="s3://", + inputFormat="org.apache.hadoop.mapred.FileInputFormat", + outputFormat="org.apache.hadoop.mapred.FileOutputFormat", + compressed=None, + numBuckets=None, + serdeInfo=SerDeInfo( + name=None, + serializationLib="org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe", + parameters=None, + description=None, + serializerClass=None, + deserializerClass=None, + serdeType=None, + ), + bucketCols=None, + sortCols=None, + parameters=None, + skewedInfo=None, + storedAsSubDirectories=None, + ) + + +def test_construct_hive_storage_descriptor_nested(table_schema_nested: Schema) -> None: + descriptor = _construct_hive_storage_descriptor(table_schema_nested, "s3://") + assert descriptor == StorageDescriptor( + cols=[ + FieldSchema(name="foo", type="string", comment=None), + FieldSchema(name="bar", type="int", comment=None), + FieldSchema(name="baz", type="boolean", comment=None), + FieldSchema(name="qux", type="array", comment=None), + FieldSchema(name="quux", type="map>", comment=None), + FieldSchema(name="location", type="array>", comment=None), + FieldSchema(name="person", type="struct", comment=None), + ], + location="s3://", + inputFormat="org.apache.hadoop.mapred.FileInputFormat", + outputFormat="org.apache.hadoop.mapred.FileOutputFormat", + compressed=None, + numBuckets=None, + serdeInfo=SerDeInfo( + name=None, + serializationLib="org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe", + parameters=None, + description=None, + serializerClass=None, + deserializerClass=None, + serdeType=None, + ), + bucketCols=None, + sortCols=None, + parameters=None, + skewedInfo=None, + storedAsSubDirectories=None, + ) + + +def test_resolve_table_location_warehouse(hive_database: HiveDatabase) -> None: + catalog = HiveCatalog(HIVE_CATALOG_NAME, warehouse="/tmp/warehouse/", uri=HIVE_METASTORE_FAKE_URL) + + # Set this one to None, so we'll fall back to the properties + hive_database.locationUri = None + + catalog._client = MagicMock() + catalog._client.__enter__().get_database.return_value = hive_database + + location = catalog._resolve_table_location(None, "database", "table") + assert location == "/tmp/warehouse/database.db/table" diff --git a/tests/catalog/test_rest.py b/tests/catalog/test_rest.py new file mode 100644 index 0000000000..1c7581d24a --- /dev/null +++ b/tests/catalog/test_rest.py @@ -0,0 +1,945 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# pylint: disable=redefined-outer-name,unused-argument +from uuid import UUID + +import pytest +from requests_mock import Mocker + +import pyiceberg +from pyiceberg.catalog import PropertiesUpdateSummary, Table +from pyiceberg.catalog.rest import RestCatalog +from pyiceberg.exceptions import ( + NamespaceAlreadyExistsError, + NoSuchNamespaceError, + NoSuchTableError, + OAuthError, + TableAlreadyExistsError, +) +from pyiceberg.io import load_file_io +from pyiceberg.partitioning import PartitionField, PartitionSpec +from pyiceberg.schema import Schema +from pyiceberg.table.metadata import TableMetadataV1 +from pyiceberg.table.refs import SnapshotRef, SnapshotRefType +from pyiceberg.table.snapshots import Operation, Snapshot, Summary +from pyiceberg.table.sorting import SortField, SortOrder +from pyiceberg.transforms import IdentityTransform, TruncateTransform +from pyiceberg.types import ( + BooleanType, + IntegerType, + NestedField, + StringType, +) + +TEST_URI = "https://iceberg-test-catalog/" +TEST_CREDENTIALS = "client:secret" +TEST_TOKEN = "some_jwt_token" +TEST_HEADERS = { + "Content-type": "application/json", + "X-Client-Version": "0.14.1", + "User-Agent": f"PyIceberg/{pyiceberg.__version__}", + "Authorization": f"Bearer {TEST_TOKEN}", +} +OAUTH_TEST_HEADERS = { + "Content-type": "application/x-www-form-urlencoded", +} + + +@pytest.fixture +def rest_mock(requests_mock: Mocker) -> Mocker: + """Takes the default requests_mock and adds the config endpoint to it + + This endpoint is called when initializing the rest catalog + """ + requests_mock.get( + f"{TEST_URI}v1/config", + json={"defaults": {}, "overrides": {}}, + status_code=200, + ) + return requests_mock + + +def test_no_uri_supplied() -> None: + with pytest.raises(KeyError): + RestCatalog("production") + + +def test_token_200(rest_mock: Mocker) -> None: + rest_mock.post( + f"{TEST_URI}v1/oauth/tokens", + json={ + "access_token": TEST_TOKEN, + "token_type": "Bearer", + "expires_in": 86400, + "issued_token_type": "urn:ietf:params:oauth:token-type:access_token", + }, + status_code=200, + request_headers=OAUTH_TEST_HEADERS, + ) + assert ( + RestCatalog("rest", uri=TEST_URI, credential=TEST_CREDENTIALS)._session.headers["Authorization"] # pylint: disable=W0212 + == f"Bearer {TEST_TOKEN}" + ) + + +def test_config_200(requests_mock: Mocker) -> None: + requests_mock.get( + f"{TEST_URI}v1/config", + json={"defaults": {}, "overrides": {}}, + status_code=200, + ) + requests_mock.post( + f"{TEST_URI}v1/oauth/tokens", + json={ + "access_token": TEST_TOKEN, + "token_type": "Bearer", + "expires_in": 86400, + "issued_token_type": "urn:ietf:params:oauth:token-type:access_token", + }, + status_code=200, + request_headers=OAUTH_TEST_HEADERS, + ) + RestCatalog("rest", uri=TEST_URI, credential=TEST_CREDENTIALS, warehouse="s3://some-bucket") + + assert requests_mock.called + assert requests_mock.call_count == 2 + + history = requests_mock.request_history + assert history[1].method == "GET" + assert history[1].url == "https://iceberg-test-catalog/v1/config?warehouse=s3%3A%2F%2Fsome-bucket" + + +def test_token_400(rest_mock: Mocker) -> None: + rest_mock.post( + f"{TEST_URI}v1/oauth/tokens", + json={"error": "invalid_client", "error_description": "Credentials for key invalid_key do not match"}, + status_code=400, + request_headers=OAUTH_TEST_HEADERS, + ) + + with pytest.raises(OAuthError) as e: + RestCatalog("rest", uri=TEST_URI, credential=TEST_CREDENTIALS) + assert str(e.value) == "invalid_client: Credentials for key invalid_key do not match" + + +def test_token_401(rest_mock: Mocker) -> None: + message = "invalid_client" + rest_mock.post( + f"{TEST_URI}v1/oauth/tokens", + json={"error": "invalid_client", "error_description": "Unknown or invalid client"}, + status_code=401, + request_headers=OAUTH_TEST_HEADERS, + ) + + with pytest.raises(OAuthError) as e: + RestCatalog("rest", uri=TEST_URI, credential=TEST_CREDENTIALS) + assert message in str(e.value) + + +def test_list_tables_200(rest_mock: Mocker) -> None: + namespace = "examples" + rest_mock.get( + f"{TEST_URI}v1/namespaces/{namespace}/tables", + json={"identifiers": [{"namespace": ["examples"], "name": "fooshare"}]}, + status_code=200, + request_headers=TEST_HEADERS, + ) + + assert RestCatalog("rest", uri=TEST_URI, token=TEST_TOKEN).list_tables(namespace) == [("examples", "fooshare")] + + +def test_list_tables_200_sigv4(rest_mock: Mocker) -> None: + namespace = "examples" + rest_mock.get( + f"{TEST_URI}v1/namespaces/{namespace}/tables", + json={"identifiers": [{"namespace": ["examples"], "name": "fooshare"}]}, + status_code=200, + request_headers=TEST_HEADERS, + ) + + assert RestCatalog("rest", **{"uri": TEST_URI, "token": TEST_TOKEN, "rest.sigv4-enabled": "true"}).list_tables(namespace) == [ + ("examples", "fooshare") + ] + assert rest_mock.called + + +def test_list_tables_404(rest_mock: Mocker) -> None: + namespace = "examples" + rest_mock.get( + f"{TEST_URI}v1/namespaces/{namespace}/tables", + json={ + "error": { + "message": "Namespace does not exist: personal in warehouse 8bcb0838-50fc-472d-9ddb-8feb89ef5f1e", + "type": "NoSuchNamespaceException", + "code": 404, + } + }, + status_code=404, + request_headers=TEST_HEADERS, + ) + with pytest.raises(NoSuchNamespaceError) as e: + RestCatalog("rest", uri=TEST_URI, token=TEST_TOKEN).list_tables(namespace) + assert "Namespace does not exist" in str(e.value) + + +def test_list_namespaces_200(rest_mock: Mocker) -> None: + rest_mock.get( + f"{TEST_URI}v1/namespaces", + json={"namespaces": [["default"], ["examples"], ["fokko"], ["system"]]}, + status_code=200, + request_headers=TEST_HEADERS, + ) + assert RestCatalog("rest", uri=TEST_URI, token=TEST_TOKEN).list_namespaces() == [ + ("default",), + ("examples",), + ("fokko",), + ("system",), + ] + + +def test_list_namespace_with_parent_200(rest_mock: Mocker) -> None: + rest_mock.get( + f"{TEST_URI}v1/namespaces?parent=accounting", + json={"namespaces": [["tax"]]}, + status_code=200, + request_headers=TEST_HEADERS, + ) + assert RestCatalog("rest", uri=TEST_URI, token=TEST_TOKEN).list_namespaces(("accounting",)) == [ + ("accounting", "tax"), + ] + + +def test_create_namespace_200(rest_mock: Mocker) -> None: + namespace = "leden" + rest_mock.post( + f"{TEST_URI}v1/namespaces", + json={"namespace": [namespace], "properties": {}}, + status_code=200, + request_headers=TEST_HEADERS, + ) + RestCatalog("rest", uri=TEST_URI, token=TEST_TOKEN).create_namespace(namespace) + + +def test_create_namespace_409(rest_mock: Mocker) -> None: + namespace = "examples" + rest_mock.post( + f"{TEST_URI}v1/namespaces", + json={ + "error": { + "message": "Namespace already exists: fokko in warehouse 8bcb0838-50fc-472d-9ddb-8feb89ef5f1e", + "type": "AlreadyExistsException", + "code": 409, + } + }, + status_code=409, + request_headers=TEST_HEADERS, + ) + with pytest.raises(NamespaceAlreadyExistsError) as e: + RestCatalog("rest", uri=TEST_URI, token=TEST_TOKEN).create_namespace(namespace) + assert "Namespace already exists" in str(e.value) + + +def test_drop_namespace_404(rest_mock: Mocker) -> None: + namespace = "examples" + rest_mock.delete( + f"{TEST_URI}v1/namespaces/{namespace}", + json={ + "error": { + "message": "Namespace does not exist: leden in warehouse 8bcb0838-50fc-472d-9ddb-8feb89ef5f1e", + "type": "NoSuchNamespaceException", + "code": 404, + } + }, + status_code=404, + request_headers=TEST_HEADERS, + ) + with pytest.raises(NoSuchNamespaceError) as e: + RestCatalog("rest", uri=TEST_URI, token=TEST_TOKEN).drop_namespace(namespace) + assert "Namespace does not exist" in str(e.value) + + +def test_load_namespace_properties_200(rest_mock: Mocker) -> None: + namespace = "leden" + rest_mock.get( + f"{TEST_URI}v1/namespaces/{namespace}", + json={"namespace": ["fokko"], "properties": {"prop": "yes"}}, + status_code=204, + request_headers=TEST_HEADERS, + ) + assert RestCatalog("rest", uri=TEST_URI, token=TEST_TOKEN).load_namespace_properties(namespace) == {"prop": "yes"} + + +def test_load_namespace_properties_404(rest_mock: Mocker) -> None: + namespace = "leden" + rest_mock.get( + f"{TEST_URI}v1/namespaces/{namespace}", + json={ + "error": { + "message": "Namespace does not exist: fokko22 in warehouse 8bcb0838-50fc-472d-9ddb-8feb89ef5f1e", + "type": "NoSuchNamespaceException", + "code": 404, + } + }, + status_code=404, + request_headers=TEST_HEADERS, + ) + with pytest.raises(NoSuchNamespaceError) as e: + RestCatalog("rest", uri=TEST_URI, token=TEST_TOKEN).load_namespace_properties(namespace) + assert "Namespace does not exist" in str(e.value) + + +def test_update_namespace_properties_200(rest_mock: Mocker) -> None: + rest_mock.post( + f"{TEST_URI}v1/namespaces/fokko/properties", + json={"removed": [], "updated": ["prop"], "missing": ["abc"]}, + status_code=200, + request_headers=TEST_HEADERS, + ) + response = RestCatalog("rest", uri=TEST_URI, token=TEST_TOKEN).update_namespace_properties( + ("fokko",), {"abc"}, {"prop": "yes"} + ) + + assert response == PropertiesUpdateSummary(removed=[], updated=["prop"], missing=["abc"]) + + +def test_update_namespace_properties_404(rest_mock: Mocker) -> None: + rest_mock.post( + f"{TEST_URI}v1/namespaces/fokko/properties", + json={ + "error": { + "message": "Namespace does not exist: does_not_exists in warehouse 8bcb0838-50fc-472d-9ddb-8feb89ef5f1e", + "type": "NoSuchNamespaceException", + "code": 404, + } + }, + status_code=404, + request_headers=TEST_HEADERS, + ) + with pytest.raises(NoSuchNamespaceError) as e: + RestCatalog("rest", uri=TEST_URI, token=TEST_TOKEN).update_namespace_properties(("fokko",), {"abc"}, {"prop": "yes"}) + assert "Namespace does not exist" in str(e.value) + + +def test_load_table_200(rest_mock: Mocker) -> None: + rest_mock.get( + f"{TEST_URI}v1/namespaces/fokko/tables/table", + json={ + "metadata-location": "s3://warehouse/database/table/metadata/00001-5f2f8166-244c-4eae-ac36-384ecdec81fc.gz.metadata.json", + "metadata": { + "format-version": 1, + "table-uuid": "b55d9dda-6561-423a-8bfc-787980ce421f", + "location": "s3://warehouse/database/table", + "last-updated-ms": 1646787054459, + "last-column-id": 2, + "schema": { + "type": "struct", + "schema-id": 0, + "fields": [ + {"id": 1, "name": "id", "required": False, "type": "int"}, + {"id": 2, "name": "data", "required": False, "type": "string"}, + ], + }, + "current-schema-id": 0, + "schemas": [ + { + "type": "struct", + "schema-id": 0, + "fields": [ + {"id": 1, "name": "id", "required": False, "type": "int"}, + {"id": 2, "name": "data", "required": False, "type": "string"}, + ], + } + ], + "partition-spec": [], + "default-spec-id": 0, + "partition-specs": [{"spec-id": 0, "fields": []}], + "last-partition-id": 999, + "default-sort-order-id": 0, + "sort-orders": [{"order-id": 0, "fields": []}], + "properties": {"owner": "bryan", "write.metadata.compression-codec": "gzip"}, + "current-snapshot-id": 3497810964824022504, + "refs": {"main": {"snapshot-id": 3497810964824022504, "type": "branch"}}, + "snapshots": [ + { + "snapshot-id": 3497810964824022504, + "timestamp-ms": 1646787054459, + "summary": { + "operation": "append", + "spark.app.id": "local-1646787004168", + "added-data-files": "1", + "added-records": "1", + "added-files-size": "697", + "changed-partition-count": "1", + "total-records": "1", + "total-files-size": "697", + "total-data-files": "1", + "total-delete-files": "0", + "total-position-deletes": "0", + "total-equality-deletes": "0", + }, + "manifest-list": "s3://warehouse/database/table/metadata/snap-3497810964824022504-1-c4f68204-666b-4e50-a9df-b10c34bf6b82.avro", + "schema-id": 0, + } + ], + "snapshot-log": [{"timestamp-ms": 1646787054459, "snapshot-id": 3497810964824022504}], + "metadata-log": [ + { + "timestamp-ms": 1646787031514, + "metadata-file": "s3://warehouse/database/table/metadata/00000-88484a1c-00e5-4a07-a787-c0e7aeffa805.gz.metadata.json", + } + ], + }, + "config": {"client.factory": "io.tabular.iceberg.catalog.TabularAwsClientFactory", "region": "us-west-2"}, + }, + status_code=200, + request_headers=TEST_HEADERS, + ) + catalog = RestCatalog("rest", uri=TEST_URI, token=TEST_TOKEN) + actual = catalog.load_table(("fokko", "table")) + expected = Table( + identifier=("rest", "fokko", "table"), + metadata_location="s3://warehouse/database/table/metadata/00001-5f2f8166-244c-4eae-ac36-384ecdec81fc.gz.metadata.json", + metadata=TableMetadataV1( + location="s3://warehouse/database/table", + table_uuid=UUID("b55d9dda-6561-423a-8bfc-787980ce421f"), + last_updated_ms=1646787054459, + last_column_id=2, + schemas=[ + Schema( + NestedField(field_id=1, name="id", field_type=IntegerType(), required=False), + NestedField(field_id=2, name="data", field_type=StringType(), required=False), + schema_id=0, + identifier_field_ids=[], + ) + ], + current_schema_id=0, + default_spec_id=0, + last_partition_id=999, + properties={"owner": "bryan", "write.metadata.compression-codec": "gzip"}, + current_snapshot_id=3497810964824022504, + snapshots=[ + Snapshot( + snapshot_id=3497810964824022504, + parent_snapshot_id=None, + sequence_number=None, + timestamp_ms=1646787054459, + manifest_list="s3://warehouse/database/table/metadata/snap-3497810964824022504-1-c4f68204-666b-4e50-a9df-b10c34bf6b82.avro", + summary=Summary( + operation=Operation.APPEND, + **{ + "spark.app.id": "local-1646787004168", + "added-data-files": "1", + "added-records": "1", + "added-files-size": "697", + "changed-partition-count": "1", + "total-records": "1", + "total-files-size": "697", + "total-data-files": "1", + "total-delete-files": "0", + "total-position-deletes": "0", + "total-equality-deletes": "0", + }, + ), + schema_id=0, + ) + ], + snapshot_log=[{"timestamp-ms": 1646787054459, "snapshot-id": 3497810964824022504}], + metadata_log=[ + { + "timestamp-ms": 1646787031514, + "metadata-file": "s3://warehouse/database/table/metadata/00000-88484a1c-00e5-4a07-a787-c0e7aeffa805.gz.metadata.json", + } + ], + sort_orders=[SortOrder(order_id=0)], + default_sort_order_id=0, + refs={ + "main": SnapshotRef( + snapshot_id=3497810964824022504, + snapshot_ref_type=SnapshotRefType.BRANCH, + min_snapshots_to_keep=None, + max_snapshot_age_ms=None, + max_ref_age_ms=None, + ) + }, + format_version=1, + schema_=Schema( + NestedField(field_id=1, name="id", field_type=IntegerType(), required=False), + NestedField(field_id=2, name="data", field_type=StringType(), required=False), + schema_id=0, + identifier_field_ids=[], + ), + partition_spec=[], + ), + io=load_file_io(), + catalog=catalog, + ) + # First compare the dicts + assert actual.metadata.model_dump() == expected.metadata.model_dump() + assert actual == expected + + +def test_load_table_404(rest_mock: Mocker) -> None: + rest_mock.get( + f"{TEST_URI}v1/namespaces/fokko/tables/does_not_exists", + json={ + "error": { + "message": "Table does not exist: examples.does_not_exists in warehouse 8bcb0838-50fc-472d-9ddb-8feb89ef5f1e", + "type": "NoSuchNamespaceErrorException", + "code": 404, + } + }, + status_code=404, + request_headers=TEST_HEADERS, + ) + + with pytest.raises(NoSuchTableError) as e: + RestCatalog("rest", uri=TEST_URI, token=TEST_TOKEN).load_table(("fokko", "does_not_exists")) + assert "Table does not exist" in str(e.value) + + +def test_drop_table_404(rest_mock: Mocker) -> None: + rest_mock.delete( + f"{TEST_URI}v1/namespaces/fokko/tables/does_not_exists", + json={ + "error": { + "message": "Table does not exist: fokko.does_not_exists in warehouse 8bcb0838-50fc-472d-9ddb-8feb89ef5f1e", + "type": "NoSuchNamespaceErrorException", + "code": 404, + } + }, + status_code=404, + request_headers=TEST_HEADERS, + ) + + with pytest.raises(NoSuchTableError) as e: + RestCatalog("rest", uri=TEST_URI, token=TEST_TOKEN).drop_table(("fokko", "does_not_exists")) + assert "Table does not exist" in str(e.value) + + +def test_create_table_200(rest_mock: Mocker, table_schema_simple: Schema) -> None: + rest_mock.post( + f"{TEST_URI}v1/namespaces/fokko/tables", + json={ + "metadata-location": "s3://warehouse/database/table/metadata.json", + "metadata": { + "format-version": 1, + "table-uuid": "bf289591-dcc0-4234-ad4f-5c3eed811a29", + "location": "s3://warehouse/database/table", + "last-updated-ms": 1657810967051, + "last-column-id": 3, + "schema": { + "type": "struct", + "schema-id": 0, + "identifier-field-ids": [2], + "fields": [ + {"id": 1, "name": "foo", "required": False, "type": "string"}, + {"id": 2, "name": "bar", "required": True, "type": "int"}, + {"id": 3, "name": "baz", "required": False, "type": "boolean"}, + ], + }, + "current-schema-id": 0, + "schemas": [ + { + "type": "struct", + "schema-id": 0, + "identifier-field-ids": [2], + "fields": [ + {"id": 1, "name": "foo", "required": False, "type": "string"}, + {"id": 2, "name": "bar", "required": True, "type": "int"}, + {"id": 3, "name": "baz", "required": False, "type": "boolean"}, + ], + } + ], + "partition-spec": [], + "default-spec-id": 0, + "last-partition-id": 999, + "default-sort-order-id": 0, + "sort-orders": [{"order-id": 0, "fields": []}], + "properties": { + "write.delete.parquet.compression-codec": "zstd", + "write.metadata.compression-codec": "gzip", + "write.summary.partition-limit": "100", + "write.parquet.compression-codec": "zstd", + }, + "current-snapshot-id": -1, + "refs": {}, + "snapshots": [], + "snapshot-log": [], + "metadata-log": [], + }, + "config": { + "client.factory": "io.tabular.iceberg.catalog.TabularAwsClientFactory", + "region": "us-west-2", + }, + }, + status_code=200, + request_headers=TEST_HEADERS, + ) + catalog = RestCatalog("rest", uri=TEST_URI, token=TEST_TOKEN) + actual = catalog.create_table( + identifier=("fokko", "fokko2"), + schema=table_schema_simple, + location=None, + partition_spec=PartitionSpec( + PartitionField(source_id=1, field_id=1000, transform=TruncateTransform(width=3), name="id"), spec_id=1 + ), + sort_order=SortOrder(SortField(source_id=2, transform=IdentityTransform())), + properties={"owner": "fokko"}, + ) + expected = Table( + identifier=("rest", "fokko", "fokko2"), + metadata_location="s3://warehouse/database/table/metadata.json", + metadata=TableMetadataV1( + location="s3://warehouse/database/table", + table_uuid=UUID("bf289591-dcc0-4234-ad4f-5c3eed811a29"), + last_updated_ms=1657810967051, + last_column_id=3, + schemas=[ + Schema( + NestedField(field_id=1, name="foo", field_type=StringType(), required=False), + NestedField(field_id=2, name="bar", field_type=IntegerType(), required=True), + NestedField(field_id=3, name="baz", field_type=BooleanType(), required=False), + schema_id=0, + identifier_field_ids=[2], + ) + ], + current_schema_id=0, + default_spec_id=0, + last_partition_id=999, + properties={ + "write.delete.parquet.compression-codec": "zstd", + "write.metadata.compression-codec": "gzip", + "write.summary.partition-limit": "100", + "write.parquet.compression-codec": "zstd", + }, + current_snapshot_id=None, + snapshots=[], + snapshot_log=[], + metadata_log=[], + sort_orders=[SortOrder(order_id=0)], + default_sort_order_id=0, + refs={}, + format_version=1, + schema_=Schema( + NestedField(field_id=1, name="foo", field_type=StringType(), required=False), + NestedField(field_id=2, name="bar", field_type=IntegerType(), required=True), + NestedField(field_id=3, name="baz", field_type=BooleanType(), required=False), + schema_id=0, + identifier_field_ids=[2], + ), + partition_spec=[], + ), + io=load_file_io(), + catalog=catalog, + ) + assert actual == expected + + +def test_create_table_409(rest_mock: Mocker, table_schema_simple: Schema) -> None: + rest_mock.post( + f"{TEST_URI}v1/namespaces/fokko/tables", + json={ + "error": { + "message": "Table already exists: fokko.already_exists in warehouse 8bcb0838-50fc-472d-9ddb-8feb89ef5f1e", + "type": "AlreadyExistsException", + "code": 409, + } + }, + status_code=409, + request_headers=TEST_HEADERS, + ) + + with pytest.raises(TableAlreadyExistsError) as e: + RestCatalog("rest", uri=TEST_URI, token=TEST_TOKEN).create_table( + identifier=("fokko", "fokko2"), + schema=table_schema_simple, + location=None, + partition_spec=PartitionSpec( + PartitionField(source_id=1, field_id=1000, transform=TruncateTransform(width=3), name="id") + ), + sort_order=SortOrder(SortField(source_id=2, transform=IdentityTransform())), + properties={"owner": "fokko"}, + ) + assert "Table already exists" in str(e.value) + + +def test_register_table_200(rest_mock: Mocker, table_schema_simple: Schema) -> None: + rest_mock.post( + f"{TEST_URI}v1/namespaces/default/register", + json={ + "metadata-location": "s3://warehouse/database/table/metadata.json", + "metadata": { + "format-version": 1, + "table-uuid": "bf289591-dcc0-4234-ad4f-5c3eed811a29", + "location": "s3://warehouse/database/table", + "last-updated-ms": 1657810967051, + "last-column-id": 3, + "schema": { + "type": "struct", + "schema-id": 0, + "identifier-field-ids": [2], + "fields": [ + {"id": 1, "name": "foo", "required": False, "type": "string"}, + {"id": 2, "name": "bar", "required": True, "type": "int"}, + {"id": 3, "name": "baz", "required": False, "type": "boolean"}, + ], + }, + "current-schema-id": 0, + "schemas": [ + { + "type": "struct", + "schema-id": 0, + "identifier-field-ids": [2], + "fields": [ + {"id": 1, "name": "foo", "required": False, "type": "string"}, + {"id": 2, "name": "bar", "required": True, "type": "int"}, + {"id": 3, "name": "baz", "required": False, "type": "boolean"}, + ], + } + ], + "partition-spec": [], + "default-spec-id": 0, + "last-partition-id": 999, + "default-sort-order-id": 0, + "sort-orders": [{"order-id": 0, "fields": []}], + "properties": { + "write.delete.parquet.compression-codec": "zstd", + "write.metadata.compression-codec": "gzip", + "write.summary.partition-limit": "100", + "write.parquet.compression-codec": "zstd", + }, + "current-snapshot-id": -1, + "refs": {}, + "snapshots": [], + "snapshot-log": [], + "metadata-log": [], + }, + "config": { + "client.factory": "io.tabular.iceberg.catalog.TabularAwsClientFactory", + "region": "us-west-2", + }, + }, + status_code=200, + request_headers=TEST_HEADERS, + ) + catalog = RestCatalog("rest", uri=TEST_URI, token=TEST_TOKEN) + actual = catalog.register_table( + identifier=("default", "registered_table"), metadata_location="s3://warehouse/database/table/metadata.json" + ) + expected = Table( + identifier=("rest", "default", "registered_table"), + metadata_location="s3://warehouse/database/table/metadata.json", + metadata=TableMetadataV1( + location="s3://warehouse/database/table", + table_uuid=UUID("bf289591-dcc0-4234-ad4f-5c3eed811a29"), + last_updated_ms=1657810967051, + last_column_id=3, + schemas=[ + Schema( + NestedField(field_id=1, name="foo", field_type=StringType(), required=False), + NestedField(field_id=2, name="bar", field_type=IntegerType(), required=True), + NestedField(field_id=3, name="baz", field_type=BooleanType(), required=False), + schema_id=0, + identifier_field_ids=[2], + ) + ], + current_schema_id=0, + default_spec_id=0, + last_partition_id=999, + properties={ + "write.delete.parquet.compression-codec": "zstd", + "write.metadata.compression-codec": "gzip", + "write.summary.partition-limit": "100", + "write.parquet.compression-codec": "zstd", + }, + current_snapshot_id=None, + snapshots=[], + snapshot_log=[], + metadata_log=[], + sort_orders=[SortOrder(order_id=0)], + default_sort_order_id=0, + refs={}, + format_version=1, + schema_=Schema( + NestedField(field_id=1, name="foo", field_type=StringType(), required=False), + NestedField(field_id=2, name="bar", field_type=IntegerType(), required=True), + NestedField(field_id=3, name="baz", field_type=BooleanType(), required=False), + schema_id=0, + identifier_field_ids=[2], + ), + partition_spec=[], + ), + io=load_file_io(), + catalog=catalog, + ) + assert actual.metadata.model_dump() == expected.metadata.model_dump() + assert actual.metadata_location == expected.metadata_location + assert actual.identifier == expected.identifier + + +def test_register_table_409(rest_mock: Mocker, table_schema_simple: Schema) -> None: + rest_mock.post( + f"{TEST_URI}v1/namespaces/default/register", + json={ + "error": { + "message": "Table already exists: fokko.fokko2 in warehouse 8bcb0838-50fc-472d-9ddb-8feb89ef5f1e", + "type": "AlreadyExistsException", + "code": 409, + } + }, + status_code=409, + request_headers=TEST_HEADERS, + ) + + catalog = RestCatalog("rest", uri=TEST_URI, token=TEST_TOKEN) + with pytest.raises(TableAlreadyExistsError) as e: + catalog.register_table( + identifier=("default", "registered_table"), metadata_location="s3://warehouse/database/table/metadata.json" + ) + assert "Table already exists" in str(e.value) + + +def test_delete_namespace_204(rest_mock: Mocker) -> None: + namespace = "example" + rest_mock.delete( + f"{TEST_URI}v1/namespaces/{namespace}", + json={}, + status_code=204, + request_headers=TEST_HEADERS, + ) + RestCatalog("rest", uri=TEST_URI, token=TEST_TOKEN).drop_namespace(namespace) + + +def test_delete_table_204(rest_mock: Mocker) -> None: + rest_mock.delete( + f"{TEST_URI}v1/namespaces/example/tables/fokko", + json={}, + status_code=204, + request_headers=TEST_HEADERS, + ) + RestCatalog("rest", uri=TEST_URI, token=TEST_TOKEN).drop_table(("example", "fokko")) + + +def test_delete_table_404(rest_mock: Mocker) -> None: + rest_mock.delete( + f"{TEST_URI}v1/namespaces/example/tables/fokko", + json={ + "error": { + "message": "Table does not exist: fokko.fokko2 in warehouse 8bcb0838-50fc-472d-9ddb-8feb89ef5f1e", + "type": "NoSuchTableException", + "code": 404, + } + }, + status_code=404, + request_headers=TEST_HEADERS, + ) + with pytest.raises(NoSuchTableError) as e: + RestCatalog("rest", uri=TEST_URI, token=TEST_TOKEN).drop_table(("example", "fokko")) + assert "Table does not exist" in str(e.value) + + +def test_create_table_missing_namespace(rest_mock: Mocker, table_schema_simple: Schema) -> None: + table = "table" + with pytest.raises(NoSuchTableError) as e: + # Missing namespace + RestCatalog("rest", uri=TEST_URI, token=TEST_TOKEN).create_table(table, table_schema_simple) + assert f"Missing namespace or invalid identifier: {table}" in str(e.value) + + +def test_load_table_invalid_namespace(rest_mock: Mocker) -> None: + table = "table" + with pytest.raises(NoSuchTableError) as e: + # Missing namespace + RestCatalog("rest", uri=TEST_URI, token=TEST_TOKEN).load_table(table) + assert f"Missing namespace or invalid identifier: {table}" in str(e.value) + + +def test_drop_table_invalid_namespace(rest_mock: Mocker) -> None: + table = "table" + with pytest.raises(NoSuchTableError) as e: + # Missing namespace + RestCatalog("rest", uri=TEST_URI, token=TEST_TOKEN).drop_table(table) + assert f"Missing namespace or invalid identifier: {table}" in str(e.value) + + +def test_purge_table_invalid_namespace(rest_mock: Mocker) -> None: + table = "table" + with pytest.raises(NoSuchTableError) as e: + # Missing namespace + RestCatalog("rest", uri=TEST_URI, token=TEST_TOKEN).purge_table(table) + assert f"Missing namespace or invalid identifier: {table}" in str(e.value) + + +def test_create_namespace_invalid_namespace(rest_mock: Mocker) -> None: + with pytest.raises(NoSuchNamespaceError) as e: + # Missing namespace + RestCatalog("rest", uri=TEST_URI, token=TEST_TOKEN).create_namespace(()) + assert "Empty namespace identifier" in str(e.value) + + +def test_drop_namespace_invalid_namespace(rest_mock: Mocker) -> None: + with pytest.raises(NoSuchNamespaceError) as e: + # Missing namespace + RestCatalog("rest", uri=TEST_URI, token=TEST_TOKEN).drop_namespace(()) + assert "Empty namespace identifier" in str(e.value) + + +def test_load_namespace_properties_invalid_namespace(rest_mock: Mocker) -> None: + with pytest.raises(NoSuchNamespaceError) as e: + # Missing namespace + RestCatalog("rest", uri=TEST_URI, token=TEST_TOKEN).load_namespace_properties(()) + assert "Empty namespace identifier" in str(e.value) + + +def test_update_namespace_properties_invalid_namespace(rest_mock: Mocker) -> None: + with pytest.raises(NoSuchNamespaceError) as e: + # Missing namespace + RestCatalog("rest", uri=TEST_URI, token=TEST_TOKEN).update_namespace_properties(()) + assert "Empty namespace identifier" in str(e.value) + + +def test_request_session_with_ssl_ca_bundle() -> None: + # Given + catalog_properties = { + "uri": TEST_URI, + "token": TEST_TOKEN, + "ssl": { + "cabundle": "path_to_ca_bundle", + }, + } + with pytest.raises(OSError) as e: + # Missing namespace + RestCatalog("rest", **catalog_properties) # type: ignore + assert "Could not find a suitable TLS CA certificate bundle, invalid path: path_to_ca_bundle" in str(e.value) + + +def test_request_session_with_ssl_client_cert() -> None: + # Given + catalog_properties = { + "uri": TEST_URI, + "token": TEST_TOKEN, + "ssl": { + "client": { + "cert": "path_to_client_cert", + "key": "path_to_client_key", + } + }, + } + with pytest.raises(OSError) as e: + # Missing namespace + RestCatalog("rest", **catalog_properties) # type: ignore + assert "Could not find the TLS certificate file, invalid path: path_to_client_cert" in str(e.value) diff --git a/tests/catalog/test_sql.py b/tests/catalog/test_sql.py new file mode 100644 index 0000000000..41bb5c76b8 --- /dev/null +++ b/tests/catalog/test_sql.py @@ -0,0 +1,384 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import os +from pathlib import Path +from typing import Generator, List + +import pytest +from pytest import TempPathFactory +from sqlalchemy.exc import ArgumentError, IntegrityError + +from pyiceberg.catalog import Identifier +from pyiceberg.catalog.sql import SqlCatalog +from pyiceberg.exceptions import ( + NamespaceAlreadyExistsError, + NamespaceNotEmptyError, + NoSuchNamespaceError, + NoSuchPropertyException, + NoSuchTableError, + TableAlreadyExistsError, +) +from pyiceberg.schema import Schema +from pyiceberg.table.sorting import ( + NullOrder, + SortDirection, + SortField, + SortOrder, +) +from pyiceberg.transforms import IdentityTransform + + +@pytest.fixture(name="warehouse", scope="session") +def fixture_warehouse(tmp_path_factory: TempPathFactory) -> Path: + return tmp_path_factory.mktemp("test_sql") + + +@pytest.fixture(name="random_identifier") +def fixture_random_identifier(warehouse: Path, database_name: str, table_name: str) -> Identifier: + os.makedirs(f"{warehouse}/{database_name}.db/{table_name}/metadata/", exist_ok=True) + return database_name, table_name + + +@pytest.fixture(name="another_random_identifier") +def fixture_another_random_identifier(warehouse: Path, database_name: str, table_name: str) -> Identifier: + database_name = database_name + "_new" + table_name = table_name + "_new" + os.makedirs(f"{warehouse}/{database_name}.db/{table_name}/metadata/", exist_ok=True) + return database_name, table_name + + +@pytest.fixture(name="test_catalog", scope="module") +def fixture_test_catalog(warehouse: Path) -> Generator[SqlCatalog, None, None]: + props = { + "uri": "sqlite+pysqlite:///:memory:", + "warehouse": f"file://{warehouse}", + } + test_catalog = SqlCatalog("test_sql_catalog", **props) + test_catalog.create_tables() + yield test_catalog + test_catalog.destroy_tables() + + +def test_creation_with_no_uri() -> None: + with pytest.raises(NoSuchPropertyException): + SqlCatalog("test_ddb_catalog", not_uri="unused") + + +def test_creation_with_unsupported_uri() -> None: + with pytest.raises(ArgumentError): + SqlCatalog("test_ddb_catalog", uri="unsupported:xxx") + + +def test_create_tables_idempotency(test_catalog: SqlCatalog) -> None: + # Second initialization should not fail even if tables are already created + test_catalog.create_tables() + test_catalog.create_tables() + + +def test_create_table_default_sort_order( + test_catalog: SqlCatalog, table_schema_nested: Schema, random_identifier: Identifier +) -> None: + database_name, _table_name = random_identifier + test_catalog.create_namespace(database_name) + table = test_catalog.create_table(random_identifier, table_schema_nested) + assert table.sort_order().order_id == 0, "Order ID must match" + assert table.sort_order().is_unsorted is True, "Order must be unsorted" + test_catalog.drop_table(random_identifier) + + +def test_create_table_custom_sort_order( + test_catalog: SqlCatalog, table_schema_nested: Schema, random_identifier: Identifier +) -> None: + database_name, _table_name = random_identifier + test_catalog.create_namespace(database_name) + order = SortOrder(SortField(source_id=2, transform=IdentityTransform(), null_order=NullOrder.NULLS_FIRST)) + table = test_catalog.create_table(random_identifier, table_schema_nested, sort_order=order) + given_sort_order = table.sort_order() + assert given_sort_order.order_id == 1, "Order ID must match" + assert len(given_sort_order.fields) == 1, "Order must have 1 field" + assert given_sort_order.fields[0].direction == SortDirection.ASC, "Direction must match" + assert given_sort_order.fields[0].null_order == NullOrder.NULLS_FIRST, "Null order must match" + assert isinstance(given_sort_order.fields[0].transform, IdentityTransform), "Transform must match" + test_catalog.drop_table(random_identifier) + + +def test_create_table_with_default_warehouse_location( + warehouse: Path, test_catalog: SqlCatalog, table_schema_nested: Schema, random_identifier: Identifier +) -> None: + database_name, _table_name = random_identifier + test_catalog.create_namespace(database_name) + test_catalog.create_table(random_identifier, table_schema_nested) + table = test_catalog.load_table(random_identifier) + assert table.identifier == (test_catalog.name,) + random_identifier + assert table.metadata_location.startswith(f"file://{warehouse}") + assert os.path.exists(table.metadata_location[len("file://") :]) + test_catalog.drop_table(random_identifier) + + +def test_create_duplicated_table(test_catalog: SqlCatalog, table_schema_nested: Schema, random_identifier: Identifier) -> None: + database_name, _table_name = random_identifier + test_catalog.create_namespace(database_name) + test_catalog.create_table(random_identifier, table_schema_nested) + with pytest.raises(TableAlreadyExistsError): + test_catalog.create_table(random_identifier, table_schema_nested) + + +def test_create_table_with_non_existing_namespace(test_catalog: SqlCatalog, table_schema_nested: Schema, table_name: str) -> None: + identifier = ("invalid", table_name) + with pytest.raises(NoSuchNamespaceError): + test_catalog.create_table(identifier, table_schema_nested) + + +def test_create_table_without_namespace(test_catalog: SqlCatalog, table_schema_nested: Schema, table_name: str) -> None: + with pytest.raises(ValueError): + test_catalog.create_table(table_name, table_schema_nested) + + +def test_register_table(test_catalog: SqlCatalog, random_identifier: Identifier, metadata_location: str) -> None: + database_name, _table_name = random_identifier + test_catalog.create_namespace(database_name) + table = test_catalog.register_table(random_identifier, metadata_location) + assert table.identifier == (test_catalog.name,) + random_identifier + assert table.metadata_location == metadata_location + assert os.path.exists(metadata_location) + test_catalog.drop_table(random_identifier) + + +def test_register_existing_table(test_catalog: SqlCatalog, random_identifier: Identifier, metadata_location: str) -> None: + database_name, _table_name = random_identifier + test_catalog.create_namespace(database_name) + test_catalog.register_table(random_identifier, metadata_location) + with pytest.raises(TableAlreadyExistsError): + test_catalog.register_table(random_identifier, metadata_location) + + +def test_register_table_with_non_existing_namespace(test_catalog: SqlCatalog, metadata_location: str, table_name: str) -> None: + identifier = ("invalid", table_name) + with pytest.raises(NoSuchNamespaceError): + test_catalog.register_table(identifier, metadata_location) + + +def test_register_table_without_namespace(test_catalog: SqlCatalog, metadata_location: str, table_name: str) -> None: + with pytest.raises(ValueError): + test_catalog.register_table(table_name, metadata_location) + + +def test_load_table(test_catalog: SqlCatalog, table_schema_nested: Schema, random_identifier: Identifier) -> None: + database_name, _table_name = random_identifier + test_catalog.create_namespace(database_name) + table = test_catalog.create_table(random_identifier, table_schema_nested) + loaded_table = test_catalog.load_table(random_identifier) + assert table.identifier == loaded_table.identifier + assert table.metadata_location == loaded_table.metadata_location + assert table.metadata == loaded_table.metadata + + +def test_drop_table(test_catalog: SqlCatalog, table_schema_nested: Schema, random_identifier: Identifier) -> None: + database_name, _table_name = random_identifier + test_catalog.create_namespace(database_name) + table = test_catalog.create_table(random_identifier, table_schema_nested) + assert table.identifier == (test_catalog.name,) + random_identifier + test_catalog.drop_table(random_identifier) + with pytest.raises(NoSuchTableError): + test_catalog.load_table(random_identifier) + + +def test_drop_table_that_does_not_exist(test_catalog: SqlCatalog, random_identifier: Identifier) -> None: + with pytest.raises(NoSuchTableError): + test_catalog.drop_table(random_identifier) + + +def test_rename_table( + test_catalog: SqlCatalog, table_schema_nested: Schema, random_identifier: Identifier, another_random_identifier: Identifier +) -> None: + from_database_name, _from_table_name = random_identifier + to_database_name, _to_table_name = another_random_identifier + test_catalog.create_namespace(from_database_name) + test_catalog.create_namespace(to_database_name) + table = test_catalog.create_table(random_identifier, table_schema_nested) + assert table.identifier == (test_catalog.name,) + random_identifier + test_catalog.rename_table(random_identifier, another_random_identifier) + new_table = test_catalog.load_table(another_random_identifier) + assert new_table.identifier == (test_catalog.name,) + another_random_identifier + assert new_table.metadata_location == table.metadata_location + with pytest.raises(NoSuchTableError): + test_catalog.load_table(random_identifier) + + +def test_rename_table_to_existing_one( + test_catalog: SqlCatalog, table_schema_nested: Schema, random_identifier: Identifier, another_random_identifier: Identifier +) -> None: + from_database_name, _from_table_name = random_identifier + to_database_name, _to_table_name = another_random_identifier + test_catalog.create_namespace(from_database_name) + test_catalog.create_namespace(to_database_name) + table = test_catalog.create_table(random_identifier, table_schema_nested) + assert table.identifier == (test_catalog.name,) + random_identifier + new_table = test_catalog.create_table(another_random_identifier, table_schema_nested) + assert new_table.identifier == (test_catalog.name,) + another_random_identifier + with pytest.raises(TableAlreadyExistsError): + test_catalog.rename_table(random_identifier, another_random_identifier) + + +def test_rename_missing_table( + test_catalog: SqlCatalog, random_identifier: Identifier, another_random_identifier: Identifier +) -> None: + to_database_name, _to_table_name = another_random_identifier + test_catalog.create_namespace(to_database_name) + with pytest.raises(NoSuchTableError): + test_catalog.rename_table(random_identifier, another_random_identifier) + + +def test_rename_table_to_missing_namespace( + test_catalog: SqlCatalog, table_schema_nested: Schema, random_identifier: Identifier, another_random_identifier: Identifier +) -> None: + from_database_name, _from_table_name = random_identifier + test_catalog.create_namespace(from_database_name) + table = test_catalog.create_table(random_identifier, table_schema_nested) + assert table.identifier == (test_catalog.name,) + random_identifier + with pytest.raises(NoSuchNamespaceError): + test_catalog.rename_table(random_identifier, another_random_identifier) + + +def test_list_tables( + test_catalog: SqlCatalog, table_schema_nested: Schema, random_identifier: Identifier, another_random_identifier: Identifier +) -> None: + database_name_1, _table_name_1 = random_identifier + database_name_2, _table_name_2 = another_random_identifier + test_catalog.create_namespace(database_name_1) + test_catalog.create_namespace(database_name_2) + test_catalog.create_table(random_identifier, table_schema_nested) + test_catalog.create_table(another_random_identifier, table_schema_nested) + identifier_list = test_catalog.list_tables(database_name_1) + assert len(identifier_list) == 1 + assert random_identifier in identifier_list + + identifier_list = test_catalog.list_tables(database_name_2) + assert len(identifier_list) == 1 + assert another_random_identifier in identifier_list + + +def test_create_namespace(test_catalog: SqlCatalog, database_name: str) -> None: + test_catalog.create_namespace(database_name) + assert (database_name,) in test_catalog.list_namespaces() + + +def test_create_duplicate_namespace(test_catalog: SqlCatalog, database_name: str) -> None: + test_catalog.create_namespace(database_name) + with pytest.raises(NamespaceAlreadyExistsError): + test_catalog.create_namespace(database_name) + + +def test_create_namespaces_sharing_same_prefix(test_catalog: SqlCatalog, database_name: str) -> None: + test_catalog.create_namespace(database_name + "_1") + # Second namespace is a prefix of the first one, make sure it can be added. + test_catalog.create_namespace(database_name) + + +def test_create_namespace_with_comment_and_location(test_catalog: SqlCatalog, database_name: str) -> None: + test_location = "/test/location" + test_properties = { + "comment": "this is a test description", + "location": test_location, + } + test_catalog.create_namespace(namespace=database_name, properties=test_properties) + loaded_database_list = test_catalog.list_namespaces() + assert (database_name,) in loaded_database_list + properties = test_catalog.load_namespace_properties(database_name) + assert properties["comment"] == "this is a test description" + assert properties["location"] == test_location + + +def test_create_namespace_with_null_properties(test_catalog: SqlCatalog, database_name: str) -> None: + with pytest.raises(IntegrityError): + test_catalog.create_namespace(namespace=database_name, properties={None: "value"}) # type: ignore + + with pytest.raises(IntegrityError): + test_catalog.create_namespace(namespace=database_name, properties={"key": None}) # type: ignore + + +def test_list_namespaces(test_catalog: SqlCatalog, database_list: List[str]) -> None: + for database_name in database_list: + test_catalog.create_namespace(database_name) + db_list = test_catalog.list_namespaces() + for database_name in database_list: + assert (database_name,) in db_list + assert len(test_catalog.list_namespaces(database_name)) == 1 + + +def test_list_non_existing_namespaces(test_catalog: SqlCatalog) -> None: + with pytest.raises(NoSuchNamespaceError): + test_catalog.list_namespaces("does_not_exist") + + +def test_drop_namespace(test_catalog: SqlCatalog, table_schema_nested: Schema, random_identifier: Identifier) -> None: + database_name, table_name = random_identifier + test_catalog.create_namespace(database_name) + assert (database_name,) in test_catalog.list_namespaces() + test_catalog.create_table((database_name, table_name), table_schema_nested) + with pytest.raises(NamespaceNotEmptyError): + test_catalog.drop_namespace(database_name) + test_catalog.drop_table((database_name, table_name)) + test_catalog.drop_namespace(database_name) + assert (database_name,) not in test_catalog.list_namespaces() + + +def test_load_namespace_properties(test_catalog: SqlCatalog, database_name: str) -> None: + warehouse_location = "/test/location" + test_properties = { + "comment": "this is a test description", + "location": f"{warehouse_location}/{database_name}.db", + "test_property1": "1", + "test_property2": "2", + "test_property3": "3", + } + + test_catalog.create_namespace(database_name, test_properties) + listed_properties = test_catalog.load_namespace_properties(database_name) + for k, v in listed_properties.items(): + assert k in test_properties + assert v == test_properties[k] + + +def test_load_empty_namespace_properties(test_catalog: SqlCatalog, database_name: str) -> None: + test_catalog.create_namespace(database_name) + listed_properties = test_catalog.load_namespace_properties(database_name) + assert listed_properties == {"exists": "true"} + + +def test_update_namespace_properties(test_catalog: SqlCatalog, database_name: str) -> None: + warehouse_location = "/test/location" + test_properties = { + "comment": "this is a test description", + "location": f"{warehouse_location}/{database_name}.db", + "test_property1": "1", + "test_property2": "2", + "test_property3": "3", + } + removals = {"test_property1", "test_property2", "test_property3", "should_not_removed"} + updates = {"test_property4": "4", "test_property5": "5", "comment": "updated test description"} + test_catalog.create_namespace(database_name, test_properties) + update_report = test_catalog.update_namespace_properties(database_name, removals, updates) + for k in updates.keys(): + assert k in update_report.updated + for k in removals: + if k == "should_not_removed": + assert k in update_report.missing + else: + assert k in update_report.removed + assert "updated test description" == test_catalog.load_namespace_properties(database_name)["comment"] diff --git a/tests/cli/test_console.py b/tests/cli/test_console.py new file mode 100644 index 0000000000..45eb4dd1be --- /dev/null +++ b/tests/cli/test_console.py @@ -0,0 +1,930 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +import os + +import pytest +from click.testing import CliRunner +from pytest_mock import MockFixture + +from pyiceberg.cli.console import run +from pyiceberg.partitioning import PartitionField, PartitionSpec +from pyiceberg.schema import Schema +from pyiceberg.transforms import IdentityTransform +from pyiceberg.typedef import Properties +from pyiceberg.types import LongType, NestedField +from pyiceberg.utils.config import Config +from tests.catalog.test_base import InMemoryCatalog + + +def test_missing_uri(mocker: MockFixture, empty_home_dir_path: str) -> None: + # mock to prevent parsing ~/.pyiceberg.yaml or {PYICEBERG_HOME}/.pyiceberg.yaml + mocker.patch.dict(os.environ, values={"HOME": empty_home_dir_path, "PYICEBERG_HOME": empty_home_dir_path}) + mocker.patch("pyiceberg.catalog._ENV_CONFIG", return_value=Config()) + + runner = CliRunner() + result = runner.invoke(run, ["list"]) + + assert result.exit_code == 1 + assert result.output == "Could not initialize catalog with the following properties: {}\n" + + +@pytest.fixture(autouse=True) +def env_vars(mocker: MockFixture) -> None: + mocker.patch.dict(os.environ, MOCK_ENVIRONMENT) + + +@pytest.fixture(name="catalog") +def fixture_catalog(mocker: MockFixture) -> InMemoryCatalog: + in_memory_catalog = InMemoryCatalog("test.in.memory.catalog", **{"test.key": "test.value"}) + mocker.patch("pyiceberg.cli.console.load_catalog", return_value=in_memory_catalog) + return in_memory_catalog + + +@pytest.fixture(name="namespace_properties") +def fixture_namespace_properties() -> Properties: + return TEST_NAMESPACE_PROPERTIES.copy() + + +TEST_TABLE_IDENTIFIER = ("default", "my_table") +TEST_TABLE_NAMESPACE = "default" +TEST_NAMESPACE_PROPERTIES = {"location": "s3://warehouse/database/location"} +TEST_TABLE_NAME = "my_table" +TEST_TABLE_SCHEMA = Schema( + NestedField(1, "x", LongType()), + NestedField(2, "y", LongType(), doc="comment"), + NestedField(3, "z", LongType()), +) +TEST_TABLE_LOCATION = "s3://bucket/test/location" +TEST_TABLE_PARTITION_SPEC = PartitionSpec(PartitionField(name="x", transform=IdentityTransform(), source_id=1, field_id=1000)) +TEST_TABLE_PROPERTIES = {"read.split.target.size": "134217728"} +MOCK_ENVIRONMENT = {"PYICEBERG_CATALOG__PRODUCTION__URI": "test://doesnotexist"} + + +def test_list_root(catalog: InMemoryCatalog) -> None: + catalog.create_namespace(TEST_TABLE_NAMESPACE) + + runner = CliRunner() + result = runner.invoke(run, ["list"]) + + assert result.exit_code == 0 + assert TEST_TABLE_NAMESPACE in result.output + + +def test_list_namespace(catalog: InMemoryCatalog) -> None: + catalog.create_table( + identifier=TEST_TABLE_IDENTIFIER, + schema=TEST_TABLE_SCHEMA, + location=TEST_TABLE_LOCATION, + partition_spec=TEST_TABLE_PARTITION_SPEC, + properties=TEST_TABLE_PROPERTIES, + ) + + runner = CliRunner() + result = runner.invoke(run, ["list", "default"]) + + assert result.exit_code == 0 + assert result.output == "default.my_table\n" + + +def test_describe_namespace(catalog: InMemoryCatalog, namespace_properties: Properties) -> None: + catalog.create_namespace(TEST_TABLE_NAMESPACE, namespace_properties) + + runner = CliRunner() + result = runner.invoke(run, ["describe", "default"]) + + assert result.exit_code == 0 + assert result.output == "location s3://warehouse/database/location\n" + + +def test_describe_namespace_does_not_exists(catalog: InMemoryCatalog) -> None: + # pylint: disable=unused-argument + + runner = CliRunner() + result = runner.invoke(run, ["describe", "doesnotexist"]) + + assert result.exit_code == 1 + assert result.output == "Namespace does not exist: ('doesnotexist',)\n" + + +def test_describe_table(catalog: InMemoryCatalog) -> None: + catalog.create_table( + identifier=TEST_TABLE_IDENTIFIER, + schema=TEST_TABLE_SCHEMA, + location=TEST_TABLE_LOCATION, + partition_spec=TEST_TABLE_PARTITION_SPEC, + ) + + runner = CliRunner() + result = runner.invoke(run, ["describe", "default.my_table"]) + assert result.exit_code == 0 + assert ( + # Strip the whitespace on the end + "\n".join([line.rstrip() for line in result.output.split("\n")]) + == """Table format version 1 +Metadata location s3://warehouse/default/my_table/metadata/metadata.json +Table UUID d20125c8-7284-442c-9aea-15fee620737c +Last Updated 1602638573874 +Partition spec [ + 1000: x: identity(1) + ] +Sort order [] +Current schema Schema, id=0 + ├── 1: x: required long + ├── 2: y: required long (comment) + └── 3: z: required long +Current snapshot None +Snapshots Snapshots + └── Snapshot 1925, schema None +Properties +""" + ) + + +def test_describe_table_does_not_exists(catalog: InMemoryCatalog) -> None: + # pylint: disable=unused-argument + + runner = CliRunner() + result = runner.invoke(run, ["describe", "default.doesnotexist"]) + assert result.exit_code == 1 + assert result.output == "Table or namespace does not exist: default.doesnotexist\n" + + +def test_schema(catalog: InMemoryCatalog) -> None: + catalog.create_table( + identifier=TEST_TABLE_IDENTIFIER, + schema=TEST_TABLE_SCHEMA, + location=TEST_TABLE_LOCATION, + partition_spec=TEST_TABLE_PARTITION_SPEC, + ) + + runner = CliRunner() + result = runner.invoke(run, ["schema", "default.my_table"]) + assert result.exit_code == 0 + assert ( + "\n".join([line.rstrip() for line in result.output.split("\n")]) + == """x long +y long comment +z long +""" + ) + + +def test_schema_does_not_exists(catalog: InMemoryCatalog) -> None: + # pylint: disable=unused-argument + + runner = CliRunner() + result = runner.invoke(run, ["schema", "default.doesnotexist"]) + assert result.exit_code == 1 + assert result.output == "Table does not exist: ('default', 'doesnotexist')\n" + + +def test_spec(catalog: InMemoryCatalog) -> None: + catalog.create_table( + identifier=TEST_TABLE_IDENTIFIER, + schema=TEST_TABLE_SCHEMA, + location=TEST_TABLE_LOCATION, + partition_spec=TEST_TABLE_PARTITION_SPEC, + ) + + runner = CliRunner() + result = runner.invoke(run, ["spec", "default.my_table"]) + assert result.exit_code == 0 + assert ( + result.output + == """[ + 1000: x: identity(1) +] +""" + ) + + +def test_spec_does_not_exists(catalog: InMemoryCatalog) -> None: + # pylint: disable=unused-argument + + runner = CliRunner() + result = runner.invoke(run, ["spec", "default.doesnotexist"]) + assert result.exit_code == 1 + assert result.output == "Table does not exist: ('default', 'doesnotexist')\n" + + +def test_uuid(catalog: InMemoryCatalog) -> None: + catalog.create_table( + identifier=TEST_TABLE_IDENTIFIER, + schema=TEST_TABLE_SCHEMA, + location=TEST_TABLE_LOCATION, + partition_spec=TEST_TABLE_PARTITION_SPEC, + ) + + runner = CliRunner() + result = runner.invoke(run, ["uuid", "default.my_table"]) + assert result.exit_code == 0 + assert result.output == """d20125c8-7284-442c-9aea-15fee620737c\n""" + + +def test_uuid_does_not_exists(catalog: InMemoryCatalog) -> None: + # pylint: disable=unused-argument + + runner = CliRunner() + result = runner.invoke(run, ["uuid", "default.doesnotexist"]) + assert result.exit_code == 1 + assert result.output == "Table does not exist: ('default', 'doesnotexist')\n" + + +def test_location(catalog: InMemoryCatalog) -> None: + catalog.create_table( + identifier=TEST_TABLE_IDENTIFIER, + schema=TEST_TABLE_SCHEMA, + location=TEST_TABLE_LOCATION, + partition_spec=TEST_TABLE_PARTITION_SPEC, + ) + + runner = CliRunner() + result = runner.invoke(run, ["location", "default.my_table"]) + assert result.exit_code == 0 + assert result.output == """s3://bucket/test/location\n""" + + +def test_location_does_not_exists(catalog: InMemoryCatalog) -> None: + # pylint: disable=unused-argument + + runner = CliRunner() + result = runner.invoke(run, ["location", "default.doesnotexist"]) + assert result.exit_code == 1 + assert result.output == "Table does not exist: ('default', 'doesnotexist')\n" + + +def test_drop_table(catalog: InMemoryCatalog) -> None: + catalog.create_table( + identifier=TEST_TABLE_IDENTIFIER, + schema=TEST_TABLE_SCHEMA, + location=TEST_TABLE_LOCATION, + partition_spec=TEST_TABLE_PARTITION_SPEC, + ) + + runner = CliRunner() + result = runner.invoke(run, ["drop", "table", "default.my_table"]) + assert result.exit_code == 0 + assert result.output == """Dropped table: default.my_table\n""" + + +def test_drop_table_does_not_exists(catalog: InMemoryCatalog) -> None: + # pylint: disable=unused-argument + + runner = CliRunner() + result = runner.invoke(run, ["drop", "table", "default.doesnotexist"]) + assert result.exit_code == 1 + assert result.output == "Table does not exist: ('default', 'doesnotexist')\n" + + +def test_drop_namespace(catalog: InMemoryCatalog) -> None: + catalog.create_namespace(TEST_TABLE_NAMESPACE) + + runner = CliRunner() + result = runner.invoke(run, ["drop", "namespace", "default"]) + assert result.exit_code == 0 + assert result.output == """Dropped namespace: default\n""" + + +def test_drop_namespace_does_not_exists(catalog: InMemoryCatalog) -> None: + # pylint: disable=unused-argument + + runner = CliRunner() + result = runner.invoke(run, ["drop", "namespace", "doesnotexist"]) + assert result.exit_code == 1 + assert result.output == "Namespace does not exist: ('doesnotexist',)\n" + + +def test_rename_table(catalog: InMemoryCatalog) -> None: + catalog.create_table( + identifier=TEST_TABLE_IDENTIFIER, + schema=TEST_TABLE_SCHEMA, + location=TEST_TABLE_LOCATION, + partition_spec=TEST_TABLE_PARTITION_SPEC, + ) + + runner = CliRunner() + result = runner.invoke(run, ["rename", "default.my_table", "default.my_new_table"]) + assert result.exit_code == 0 + assert result.output == """Renamed table from default.my_table to default.my_new_table\n""" + + +def test_rename_table_does_not_exists(catalog: InMemoryCatalog) -> None: + # pylint: disable=unused-argument + + runner = CliRunner() + result = runner.invoke(run, ["rename", "default.doesnotexist", "default.bar"]) + assert result.exit_code == 1 + assert result.output == "Table does not exist: ('default', 'doesnotexist')\n" + + +def test_properties_get_table(catalog: InMemoryCatalog) -> None: + catalog.create_table( + identifier=TEST_TABLE_IDENTIFIER, + schema=TEST_TABLE_SCHEMA, + location=TEST_TABLE_LOCATION, + partition_spec=TEST_TABLE_PARTITION_SPEC, + properties=TEST_TABLE_PROPERTIES, + ) + + runner = CliRunner() + result = runner.invoke(run, ["properties", "get", "table", "default.my_table"]) + assert result.exit_code == 0 + assert result.output == "read.split.target.size 134217728\n" + + +def test_properties_get_table_specific_property(catalog: InMemoryCatalog) -> None: + catalog.create_table( + identifier=TEST_TABLE_IDENTIFIER, + schema=TEST_TABLE_SCHEMA, + location=TEST_TABLE_LOCATION, + partition_spec=TEST_TABLE_PARTITION_SPEC, + properties=TEST_TABLE_PROPERTIES, + ) + + runner = CliRunner() + result = runner.invoke(run, ["properties", "get", "table", "default.my_table", "read.split.target.size"]) + assert result.exit_code == 0 + assert result.output == "134217728\n" + + +def test_properties_get_table_specific_property_that_doesnt_exist(catalog: InMemoryCatalog) -> None: + catalog.create_table( + identifier=TEST_TABLE_IDENTIFIER, + schema=TEST_TABLE_SCHEMA, + location=TEST_TABLE_LOCATION, + partition_spec=TEST_TABLE_PARTITION_SPEC, + properties=TEST_TABLE_PROPERTIES, + ) + + runner = CliRunner() + result = runner.invoke(run, ["properties", "get", "table", "default.my_table", "doesnotexist"]) + assert result.exit_code == 1 + assert result.output == "Could not find property doesnotexist on table default.my_table\n" + + +def test_properties_get_table_does_not_exist(catalog: InMemoryCatalog) -> None: + # pylint: disable=unused-argument + + runner = CliRunner() + result = runner.invoke(run, ["properties", "get", "table", "doesnotexist"]) + assert result.exit_code == 1 + assert result.output == "Table does not exist: ('doesnotexist',)\n" + + +def test_properties_get_namespace(catalog: InMemoryCatalog, namespace_properties: Properties) -> None: + catalog.create_namespace(TEST_TABLE_NAMESPACE, namespace_properties) + + runner = CliRunner() + result = runner.invoke(run, ["properties", "get", "namespace", "default"]) + assert result.exit_code == 0 + assert result.output == "location s3://warehouse/database/location\n" + + +def test_properties_get_namespace_specific_property(catalog: InMemoryCatalog, namespace_properties: Properties) -> None: + catalog.create_namespace(TEST_TABLE_NAMESPACE, namespace_properties) + + runner = CliRunner() + result = runner.invoke(run, ["properties", "get", "namespace", "default", "location"]) + assert result.exit_code == 0 + assert result.output == "s3://warehouse/database/location\n" + + +def test_properties_get_namespace_does_not_exist(catalog: InMemoryCatalog, namespace_properties: Properties) -> None: + catalog.create_namespace(TEST_TABLE_NAMESPACE, namespace_properties) + + runner = CliRunner() + result = runner.invoke(run, ["properties", "get", "namespace", "doesnotexist"]) + assert result.exit_code == 1 + assert result.output == "Namespace does not exist: ('doesnotexist',)\n" + + +def test_properties_set_namespace(catalog: InMemoryCatalog, namespace_properties: Properties) -> None: + catalog.create_namespace(TEST_TABLE_NAMESPACE, namespace_properties) + + runner = CliRunner() + result = runner.invoke(run, ["properties", "set", "namespace", "default", "location", "s3://new_location"]) + assert result.exit_code == 0 + assert result.output == "Updated location on default\n" + + +def test_properties_set_namespace_that_doesnt_exist(catalog: InMemoryCatalog) -> None: + # pylint: disable=unused-argument + + runner = CliRunner() + result = runner.invoke(run, ["properties", "set", "namespace", "doesnotexist", "location", "s3://new_location"]) + assert result.exit_code == 1 + assert result.output == "Namespace does not exist: ('doesnotexist',)\n" + + +def test_properties_set_table(catalog: InMemoryCatalog) -> None: + catalog.create_table( + identifier=TEST_TABLE_IDENTIFIER, + schema=TEST_TABLE_SCHEMA, + location=TEST_TABLE_LOCATION, + partition_spec=TEST_TABLE_PARTITION_SPEC, + ) + + runner = CliRunner() + result = runner.invoke(run, ["properties", "set", "table", "default.my_table", "location", "s3://new_location"]) + assert result.exit_code == 1 + assert "Writing is WIP" in result.output + + +def test_properties_set_table_does_not_exist(catalog: InMemoryCatalog) -> None: + # pylint: disable=unused-argument + + runner = CliRunner() + result = runner.invoke(run, ["properties", "set", "table", "default.doesnotexist", "location", "s3://new_location"]) + assert result.exit_code == 1 + assert result.output == "Table does not exist: ('default', 'doesnotexist')\n" + + +def test_properties_remove_namespace(catalog: InMemoryCatalog, namespace_properties: Properties) -> None: + catalog.create_namespace(TEST_TABLE_NAMESPACE, namespace_properties) + + runner = CliRunner() + result = runner.invoke(run, ["properties", "remove", "namespace", "default", "location"]) + assert result.exit_code == 0 + assert result.output == "Property location removed from default\n" + + +def test_properties_remove_namespace_that_doesnt_exist(catalog: InMemoryCatalog) -> None: + # pylint: disable=unused-argument + + runner = CliRunner() + result = runner.invoke(run, ["properties", "remove", "namespace", "doesnotexist", "location"]) + assert result.exit_code == 1 + assert result.output == "Namespace does not exist: ('doesnotexist',)\n" + + +def test_properties_remove_table(catalog: InMemoryCatalog) -> None: + catalog.create_table( + identifier=TEST_TABLE_IDENTIFIER, + schema=TEST_TABLE_SCHEMA, + location=TEST_TABLE_LOCATION, + partition_spec=TEST_TABLE_PARTITION_SPEC, + properties=TEST_TABLE_PROPERTIES, + ) + + runner = CliRunner() + result = runner.invoke(run, ["properties", "remove", "table", "default.my_table", "read.split.target.size"]) + assert result.exit_code == 1 + assert result.output == "Writing is WIP\n1\n" + + +def test_properties_remove_table_property_does_not_exists(catalog: InMemoryCatalog) -> None: + catalog.create_table( + identifier=TEST_TABLE_IDENTIFIER, + schema=TEST_TABLE_SCHEMA, + location=TEST_TABLE_LOCATION, + partition_spec=TEST_TABLE_PARTITION_SPEC, + ) + + runner = CliRunner() + result = runner.invoke(run, ["properties", "remove", "table", "default.my_table", "doesnotexist"]) + assert result.exit_code == 1 + assert result.output == "Property doesnotexist does not exist on default.my_table\n" + + +def test_properties_remove_table_does_not_exist(catalog: InMemoryCatalog) -> None: + # pylint: disable=unused-argument + + runner = CliRunner() + result = runner.invoke(run, ["properties", "remove", "table", "default.doesnotexist", "location"]) + assert result.exit_code == 1 + assert result.output == "Table does not exist: ('default', 'doesnotexist')\n" + + +def test_json_list_root(catalog: InMemoryCatalog) -> None: + catalog.create_namespace(TEST_TABLE_NAMESPACE) + + runner = CliRunner() + result = runner.invoke(run, ["--output=json", "list"]) + assert result.exit_code == 0 + assert result.output == """["default"]\n""" + + +def test_json_list_namespace(catalog: InMemoryCatalog) -> None: + catalog.create_table( + identifier=TEST_TABLE_IDENTIFIER, + schema=TEST_TABLE_SCHEMA, + location=TEST_TABLE_LOCATION, + partition_spec=TEST_TABLE_PARTITION_SPEC, + ) + + runner = CliRunner() + result = runner.invoke(run, ["--output=json", "list", "default"]) + assert result.exit_code == 0 + assert result.output == """["default.my_table"]\n""" + + +def test_json_describe_namespace(catalog: InMemoryCatalog, namespace_properties: Properties) -> None: + catalog.create_namespace(TEST_TABLE_NAMESPACE, namespace_properties) + + runner = CliRunner() + result = runner.invoke(run, ["--output=json", "describe", "default"]) + assert result.exit_code == 0 + assert result.output == """{"location": "s3://warehouse/database/location"}\n""" + + +def test_json_describe_namespace_does_not_exists(catalog: InMemoryCatalog) -> None: + # pylint: disable=unused-argument + + runner = CliRunner() + result = runner.invoke(run, ["--output=json", "describe", "doesnotexist"]) + assert result.exit_code == 1 + assert result.output == """{"type": "NoSuchNamespaceError", "message": "Namespace does not exist: ('doesnotexist',)"}\n""" + + +def test_json_describe_table(catalog: InMemoryCatalog) -> None: + catalog.create_table( + identifier=TEST_TABLE_IDENTIFIER, + schema=TEST_TABLE_SCHEMA, + location=TEST_TABLE_LOCATION, + partition_spec=TEST_TABLE_PARTITION_SPEC, + ) + + runner = CliRunner() + result = runner.invoke(run, ["--output=json", "describe", "default.my_table"]) + assert result.exit_code == 0 + assert ( + result.output + == """{"identifier":["default","my_table"],"metadata_location":"s3://warehouse/default/my_table/metadata/metadata.json","metadata":{"location":"s3://bucket/test/location","table-uuid":"d20125c8-7284-442c-9aea-15fee620737c","last-updated-ms":1602638573874,"last-column-id":3,"schemas":[{"type":"struct","fields":[{"id":1,"name":"x","type":"long","required":true},{"id":2,"name":"y","type":"long","required":true,"doc":"comment"},{"id":3,"name":"z","type":"long","required":true}],"schema-id":0,"identifier-field-ids":[]}],"current-schema-id":0,"partition-specs":[{"spec-id":0,"fields":[{"source-id":1,"field-id":1000,"transform":"identity","name":"x"}]}],"default-spec-id":0,"last-partition-id":1000,"properties":{},"snapshots":[{"snapshot-id":1925,"timestamp-ms":1602638573822}],"snapshot-log":[],"metadata-log":[],"sort-orders":[{"order-id":0,"fields":[]}],"default-sort-order-id":0,"refs":{},"format-version":1,"schema":{"type":"struct","fields":[{"id":1,"name":"x","type":"long","required":true},{"id":2,"name":"y","type":"long","required":true,"doc":"comment"},{"id":3,"name":"z","type":"long","required":true}],"schema-id":0,"identifier-field-ids":[]},"partition-spec":[{"source-id":1,"field-id":1000,"transform":"identity","name":"x"}]}}\n""" + ) + + +def test_json_describe_table_does_not_exists(catalog: InMemoryCatalog) -> None: + # pylint: disable=unused-argument + + runner = CliRunner() + result = runner.invoke(run, ["--output=json", "describe", "default.doesnotexist"]) + assert result.exit_code == 1 + assert ( + result.output + == """{"type": "NoSuchTableError", "message": "Table or namespace does not exist: default.doesnotexist"}\n""" + ) + + +def test_json_schema(catalog: InMemoryCatalog) -> None: + catalog.create_table( + identifier=TEST_TABLE_IDENTIFIER, + schema=TEST_TABLE_SCHEMA, + location=TEST_TABLE_LOCATION, + partition_spec=TEST_TABLE_PARTITION_SPEC, + ) + + runner = CliRunner() + result = runner.invoke(run, ["--output=json", "schema", "default.my_table"]) + assert result.exit_code == 0 + assert ( + result.output + == """{"type":"struct","fields":[{"id":1,"name":"x","type":"long","required":true},{"id":2,"name":"y","type":"long","required":true,"doc":"comment"},{"id":3,"name":"z","type":"long","required":true}],"schema-id":0,"identifier-field-ids":[]}\n""" + ) + + +def test_json_schema_does_not_exists(catalog: InMemoryCatalog) -> None: + # pylint: disable=unused-argument + + runner = CliRunner() + result = runner.invoke(run, ["--output=json", "schema", "default.doesnotexist"]) + assert result.exit_code == 1 + assert result.output == """{"type": "NoSuchTableError", "message": "Table does not exist: ('default', 'doesnotexist')"}\n""" + + +def test_json_spec(catalog: InMemoryCatalog) -> None: + catalog.create_table( + identifier=TEST_TABLE_IDENTIFIER, + schema=TEST_TABLE_SCHEMA, + location=TEST_TABLE_LOCATION, + partition_spec=TEST_TABLE_PARTITION_SPEC, + ) + + runner = CliRunner() + result = runner.invoke(run, ["--output=json", "spec", "default.my_table"]) + assert result.exit_code == 0 + assert result.output == """{"spec-id":0,"fields":[{"source-id":1,"field-id":1000,"transform":"identity","name":"x"}]}\n""" + + +def test_json_spec_does_not_exists(catalog: InMemoryCatalog) -> None: + # pylint: disable=unused-argument + + runner = CliRunner() + result = runner.invoke(run, ["--output=json", "spec", "default.doesnotexist"]) + assert result.exit_code == 1 + assert result.output == """{"type": "NoSuchTableError", "message": "Table does not exist: ('default', 'doesnotexist')"}\n""" + + +def test_json_uuid(catalog: InMemoryCatalog) -> None: + catalog.create_table( + identifier=TEST_TABLE_IDENTIFIER, + schema=TEST_TABLE_SCHEMA, + location=TEST_TABLE_LOCATION, + partition_spec=TEST_TABLE_PARTITION_SPEC, + ) + + runner = CliRunner() + result = runner.invoke(run, ["--output=json", "uuid", "default.my_table"]) + assert result.exit_code == 0 + assert result.output == """{"uuid": "d20125c8-7284-442c-9aea-15fee620737c"}\n""" + + +def test_json_uuid_does_not_exists(catalog: InMemoryCatalog) -> None: + # pylint: disable=unused-argument + + runner = CliRunner() + result = runner.invoke(run, ["--output=json", "uuid", "default.doesnotexist"]) + assert result.exit_code == 1 + assert result.output == """{"type": "NoSuchTableError", "message": "Table does not exist: ('default', 'doesnotexist')"}\n""" + + +def test_json_location(catalog: InMemoryCatalog) -> None: + catalog.create_table( + identifier=TEST_TABLE_IDENTIFIER, + schema=TEST_TABLE_SCHEMA, + location=TEST_TABLE_LOCATION, + partition_spec=TEST_TABLE_PARTITION_SPEC, + ) + + runner = CliRunner() + result = runner.invoke(run, ["--output=json", "location", "default.my_table"]) + assert result.exit_code == 0 + assert result.output == """"s3://bucket/test/location"\n""" + + +def test_json_location_does_not_exists(catalog: InMemoryCatalog) -> None: + # pylint: disable=unused-argument + + runner = CliRunner() + result = runner.invoke(run, ["--output=json", "location", "default.doesnotexist"]) + assert result.exit_code == 1 + assert result.output == """{"type": "NoSuchTableError", "message": "Table does not exist: ('default', 'doesnotexist')"}\n""" + + +def test_json_drop_table(catalog: InMemoryCatalog) -> None: + catalog.create_table( + identifier=TEST_TABLE_IDENTIFIER, + schema=TEST_TABLE_SCHEMA, + location=TEST_TABLE_LOCATION, + partition_spec=TEST_TABLE_PARTITION_SPEC, + ) + + runner = CliRunner() + result = runner.invoke(run, ["--output=json", "drop", "table", "default.my_table"]) + assert result.exit_code == 0 + assert result.output == """"Dropped table: default.my_table"\n""" + + +def test_json_drop_table_does_not_exists(catalog: InMemoryCatalog) -> None: + # pylint: disable=unused-argument + + runner = CliRunner() + result = runner.invoke(run, ["--output=json", "drop", "table", "default.doesnotexist"]) + assert result.exit_code == 1 + assert result.output == """{"type": "NoSuchTableError", "message": "Table does not exist: ('default', 'doesnotexist')"}\n""" + + +def test_json_drop_namespace(catalog: InMemoryCatalog) -> None: + catalog.create_namespace(TEST_TABLE_NAMESPACE) + + runner = CliRunner() + result = runner.invoke(run, ["--output=json", "drop", "namespace", "default"]) + assert result.exit_code == 0 + assert result.output == """"Dropped namespace: default"\n""" + + +def test_json_drop_namespace_does_not_exists(catalog: InMemoryCatalog) -> None: + # pylint: disable=unused-argument + + runner = CliRunner() + result = runner.invoke(run, ["--output=json", "drop", "namespace", "doesnotexist"]) + assert result.exit_code == 1 + assert result.output == """{"type": "NoSuchNamespaceError", "message": "Namespace does not exist: ('doesnotexist',)"}\n""" + + +def test_json_rename_table(catalog: InMemoryCatalog) -> None: + catalog.create_table( + identifier=TEST_TABLE_IDENTIFIER, + schema=TEST_TABLE_SCHEMA, + location=TEST_TABLE_LOCATION, + partition_spec=TEST_TABLE_PARTITION_SPEC, + ) + + runner = CliRunner() + result = runner.invoke(run, ["--output=json", "rename", "default.my_table", "default.my_new_table"]) + assert result.exit_code == 0 + assert result.output == """"Renamed table from default.my_table to default.my_new_table"\n""" + + +def test_json_rename_table_does_not_exists(catalog: InMemoryCatalog) -> None: + # pylint: disable=unused-argument + + runner = CliRunner() + result = runner.invoke(run, ["--output=json", "rename", "default.doesnotexist", "default.bar"]) + assert result.exit_code == 1 + assert result.output == """{"type": "NoSuchTableError", "message": "Table does not exist: ('default', 'doesnotexist')"}\n""" + + +def test_json_properties_get_table(catalog: InMemoryCatalog) -> None: + catalog.create_table( + identifier=TEST_TABLE_IDENTIFIER, + schema=TEST_TABLE_SCHEMA, + location=TEST_TABLE_LOCATION, + partition_spec=TEST_TABLE_PARTITION_SPEC, + properties=TEST_TABLE_PROPERTIES, + ) + + runner = CliRunner() + result = runner.invoke(run, ["--output=json", "properties", "get", "table", "default.my_table"]) + assert result.exit_code == 0 + assert result.output == """{"read.split.target.size": "134217728"}\n""" + + +def test_json_properties_get_table_specific_property(catalog: InMemoryCatalog) -> None: + catalog.create_table( + identifier=TEST_TABLE_IDENTIFIER, + schema=TEST_TABLE_SCHEMA, + location=TEST_TABLE_LOCATION, + partition_spec=TEST_TABLE_PARTITION_SPEC, + properties=TEST_TABLE_PROPERTIES, + ) + + runner = CliRunner() + result = runner.invoke(run, ["--output=json", "properties", "get", "table", "default.my_table", "read.split.target.size"]) + assert result.exit_code == 0 + assert result.output == """"134217728"\n""" + + +def test_json_properties_get_table_specific_property_that_doesnt_exist(catalog: InMemoryCatalog) -> None: + catalog.create_table( + identifier=TEST_TABLE_IDENTIFIER, + schema=TEST_TABLE_SCHEMA, + location=TEST_TABLE_LOCATION, + partition_spec=TEST_TABLE_PARTITION_SPEC, + properties=TEST_TABLE_PROPERTIES, + ) + + runner = CliRunner() + result = runner.invoke(run, ["--output=json", "properties", "get", "table", "default.my_table", "doesnotexist"]) + assert result.exit_code == 1 + assert ( + result.output + == """{"type": "NoSuchPropertyException", "message": "Could not find property doesnotexist on table default.my_table"}\n""" + ) + + +def test_json_properties_get_table_does_not_exist(catalog: InMemoryCatalog) -> None: + # pylint: disable=unused-argument + + runner = CliRunner() + result = runner.invoke(run, ["--output=json", "properties", "get", "table", "doesnotexist"]) + assert result.exit_code == 1 + assert result.output == """{"type": "NoSuchTableError", "message": "Table does not exist: ('doesnotexist',)"}\n""" + + +def test_json_properties_get_namespace(catalog: InMemoryCatalog, namespace_properties: Properties) -> None: + catalog.create_namespace(TEST_TABLE_NAMESPACE, namespace_properties) + + runner = CliRunner() + result = runner.invoke(run, ["--output=json", "properties", "get", "namespace", "default"]) + assert result.exit_code == 0 + assert result.output == """{"location": "s3://warehouse/database/location"}\n""" + + +def test_json_properties_get_namespace_specific_property(catalog: InMemoryCatalog, namespace_properties: Properties) -> None: + catalog.create_namespace(TEST_TABLE_NAMESPACE, namespace_properties) + + runner = CliRunner() + result = runner.invoke(run, ["--output=json", "properties", "get", "namespace", "default", "location"]) + assert result.exit_code == 0 + assert result.output == """"s3://warehouse/database/location"\n""" + + +def test_json_properties_get_namespace_does_not_exist(catalog: InMemoryCatalog) -> None: + # pylint: disable=unused-argument + + runner = CliRunner() + result = runner.invoke(run, ["--output=json", "properties", "get", "namespace", "doesnotexist"]) + assert result.exit_code == 1 + assert result.output == """{"type": "NoSuchNamespaceError", "message": "Namespace does not exist: ('doesnotexist',)"}\n""" + + +def test_json_properties_set_namespace(catalog: InMemoryCatalog, namespace_properties: Properties) -> None: + catalog.create_namespace(TEST_TABLE_NAMESPACE, namespace_properties) + + runner = CliRunner() + result = runner.invoke(run, ["--output=json", "properties", "set", "namespace", "default", "location", "s3://new_location"]) + assert result.exit_code == 0 + assert result.output == """"Updated location on default"\n""" + + +def test_json_properties_set_namespace_that_doesnt_exist(catalog: InMemoryCatalog) -> None: + # pylint: disable=unused-argument + + runner = CliRunner() + result = runner.invoke( + run, ["--output=json", "properties", "set", "namespace", "doesnotexist", "location", "s3://new_location"] + ) + assert result.exit_code == 1 + assert result.output == """{"type": "NoSuchNamespaceError", "message": "Namespace does not exist: ('doesnotexist',)"}\n""" + + +def test_json_properties_set_table(catalog: InMemoryCatalog) -> None: + catalog.create_table( + identifier=TEST_TABLE_IDENTIFIER, + schema=TEST_TABLE_SCHEMA, + location=TEST_TABLE_LOCATION, + partition_spec=TEST_TABLE_PARTITION_SPEC, + properties=TEST_TABLE_PROPERTIES, + ) + + runner = CliRunner() + result = runner.invoke( + run, ["--output=json", "properties", "set", "table", "default.my_table", "location", "s3://new_location"] + ) + assert result.exit_code == 1 + assert "Writing is WIP" in result.output + + +def test_json_properties_set_table_does_not_exist(catalog: InMemoryCatalog) -> None: + # pylint: disable=unused-argument + + runner = CliRunner() + result = runner.invoke( + run, ["--output=json", "properties", "set", "table", "default.doesnotexist", "location", "s3://new_location"] + ) + assert result.exit_code == 1 + assert result.output == """{"type": "NoSuchTableError", "message": "Table does not exist: ('default', 'doesnotexist')"}\n""" + + +def test_json_properties_remove_namespace(catalog: InMemoryCatalog, namespace_properties: Properties) -> None: + catalog.create_namespace(TEST_TABLE_NAMESPACE, namespace_properties) + + runner = CliRunner() + result = runner.invoke(run, ["--output=json", "properties", "remove", "namespace", "default", "location"]) + assert result.exit_code == 0 + assert result.output == """"Property location removed from default"\n""" + + +def test_json_properties_remove_namespace_that_doesnt_exist(catalog: InMemoryCatalog) -> None: + # pylint: disable=unused-argument + + runner = CliRunner() + result = runner.invoke(run, ["--output=json", "properties", "remove", "namespace", "doesnotexist", "location"]) + assert result.exit_code == 1 + assert result.output == """{"type": "NoSuchNamespaceError", "message": "Namespace does not exist: ('doesnotexist',)"}\n""" + + +def test_json_properties_remove_table(catalog: InMemoryCatalog) -> None: + catalog.create_table( + identifier=TEST_TABLE_IDENTIFIER, + schema=TEST_TABLE_SCHEMA, + location=TEST_TABLE_LOCATION, + partition_spec=TEST_TABLE_PARTITION_SPEC, + properties=TEST_TABLE_PROPERTIES, + ) + + runner = CliRunner() + result = runner.invoke(run, ["--output=json", "properties", "remove", "table", "default.my_table", "read.split.target.size"]) + assert result.exit_code == 1 + assert "Writing is WIP" in result.output + + +def test_json_properties_remove_table_property_does_not_exists(catalog: InMemoryCatalog) -> None: + catalog.create_table( + identifier=TEST_TABLE_IDENTIFIER, + schema=TEST_TABLE_SCHEMA, + location=TEST_TABLE_LOCATION, + partition_spec=TEST_TABLE_PARTITION_SPEC, + properties=TEST_TABLE_PROPERTIES, + ) + + runner = CliRunner() + result = runner.invoke(run, ["--output=json", "properties", "remove", "table", "default.my_table", "doesnotexist"]) + assert result.exit_code == 1 + assert ( + result.output + == """{"type": "NoSuchPropertyException", "message": "Property doesnotexist does not exist on default.my_table"}\n""" + ) + + +def test_json_properties_remove_table_does_not_exist(catalog: InMemoryCatalog) -> None: + # pylint: disable=unused-argument + + runner = CliRunner() + result = runner.invoke(run, ["--output=json", "properties", "remove", "table", "default.doesnotexist", "location"]) + assert result.exit_code == 1 + assert result.output == """{"type": "NoSuchTableError", "message": "Table does not exist: ('default', 'doesnotexist')"}\n""" diff --git a/tests/cli/test_output.py b/tests/cli/test_output.py new file mode 100644 index 0000000000..a67d5ea255 --- /dev/null +++ b/tests/cli/test_output.py @@ -0,0 +1,16 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. diff --git a/tests/conftest.py b/tests/conftest.py new file mode 100644 index 0000000000..ed7f1caa21 --- /dev/null +++ b/tests/conftest.py @@ -0,0 +1,1661 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# pylint:disable=redefined-outer-name +"""This contains global pytest configurations. + +Fixtures contained in this file will be automatically used if provided as an argument +to any pytest function. + +In the case where the fixture must be used in a pytest.mark.parametrize decorator, the string representation can be used +and the built-in pytest fixture request should be used as an additional argument in the function. The fixture can then be +retrieved using `request.getfixturevalue(fixture_name)`. +""" +import os +import re +import string +import uuid +from datetime import datetime +from random import choice +from tempfile import TemporaryDirectory +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Dict, + Generator, + List, + Optional, +) +from unittest.mock import MagicMock +from urllib.parse import urlparse + +import aiobotocore.awsrequest +import aiobotocore.endpoint +import aiohttp +import aiohttp.client_reqrep +import aiohttp.typedefs +import boto3 +import botocore.awsrequest +import botocore.model +import pytest +from moto import mock_dynamodb, mock_glue, mock_s3 + +from pyiceberg import schema +from pyiceberg.catalog import Catalog +from pyiceberg.catalog.noop import NoopCatalog +from pyiceberg.io import ( + GCS_ENDPOINT, + GCS_PROJECT_ID, + GCS_TOKEN, + GCS_TOKEN_EXPIRES_AT_MS, + OutputFile, + OutputStream, + fsspec, + load_file_io, +) +from pyiceberg.io.fsspec import FsspecFileIO +from pyiceberg.manifest import DataFile, FileFormat +from pyiceberg.schema import Schema +from pyiceberg.serializers import ToOutputFile +from pyiceberg.table import FileScanTask, Table +from pyiceberg.table.metadata import TableMetadataV2 +from pyiceberg.types import ( + BinaryType, + BooleanType, + DateType, + DoubleType, + FloatType, + IntegerType, + ListType, + LongType, + MapType, + NestedField, + StringType, + StructType, +) +from pyiceberg.utils.datetime import datetime_to_millis + +if TYPE_CHECKING: + from pyiceberg.io.pyarrow import PyArrowFile, PyArrowFileIO + + +def pytest_collection_modifyitems(items: List[pytest.Item]) -> None: + for item in items: + if not any(item.iter_markers()): + item.add_marker("unmarked") + + +def pytest_addoption(parser: pytest.Parser) -> None: + # S3 options + parser.addoption( + "--s3.endpoint", action="store", default="http://localhost:9000", help="The S3 endpoint URL for tests marked as s3" + ) + parser.addoption("--s3.access-key-id", action="store", default="admin", help="The AWS access key ID for tests marked as s3") + parser.addoption( + "--s3.secret-access-key", action="store", default="password", help="The AWS secret access key ID for tests marked as s3" + ) + # ADLFS options + # Azurite provides default account name and key. Those can be customized using env variables. + # For more information, see README file at https://github.com/azure/azurite#default-storage-account + parser.addoption( + "--adlfs.endpoint", + action="store", + default="http://127.0.0.1:10000", + help="The ADLS endpoint URL for tests marked as adlfs", + ) + parser.addoption( + "--adlfs.account-name", action="store", default="devstoreaccount1", help="The ADLS account key for tests marked as adlfs" + ) + parser.addoption( + "--adlfs.account-key", + action="store", + default="Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==", + help="The ADLS secret account key for tests marked as adlfs", + ) + parser.addoption( + "--gcs.endpoint", action="store", default="http://0.0.0.0:4443", help="The GCS endpoint URL for tests marked gcs" + ) + parser.addoption( + "--gcs.oauth2.token", action="store", default="anon", help="The GCS authentication method for tests marked gcs" + ) + parser.addoption("--gcs.project-id", action="store", default="test", help="The GCP project for tests marked gcs") + + +@pytest.fixture(scope="session") +def table_schema_simple() -> Schema: + return schema.Schema( + NestedField(field_id=1, name="foo", field_type=StringType(), required=False), + NestedField(field_id=2, name="bar", field_type=IntegerType(), required=True), + NestedField(field_id=3, name="baz", field_type=BooleanType(), required=False), + schema_id=1, + identifier_field_ids=[2], + ) + + +@pytest.fixture(scope="session") +def table_schema_nested() -> Schema: + return schema.Schema( + NestedField(field_id=1, name="foo", field_type=StringType(), required=False), + NestedField(field_id=2, name="bar", field_type=IntegerType(), required=True), + NestedField(field_id=3, name="baz", field_type=BooleanType(), required=False), + NestedField( + field_id=4, + name="qux", + field_type=ListType(element_id=5, element_type=StringType(), element_required=True), + required=True, + ), + NestedField( + field_id=6, + name="quux", + field_type=MapType( + key_id=7, + key_type=StringType(), + value_id=8, + value_type=MapType(key_id=9, key_type=StringType(), value_id=10, value_type=IntegerType(), value_required=True), + value_required=True, + ), + required=True, + ), + NestedField( + field_id=11, + name="location", + field_type=ListType( + element_id=12, + element_type=StructType( + NestedField(field_id=13, name="latitude", field_type=FloatType(), required=False), + NestedField(field_id=14, name="longitude", field_type=FloatType(), required=False), + ), + element_required=True, + ), + required=True, + ), + NestedField( + field_id=15, + name="person", + field_type=StructType( + NestedField(field_id=16, name="name", field_type=StringType(), required=False), + NestedField(field_id=17, name="age", field_type=IntegerType(), required=True), + ), + required=False, + ), + schema_id=1, + identifier_field_ids=[2], + ) + + +@pytest.fixture(scope="session") +def table_schema_nested_with_struct_key_map() -> Schema: + return schema.Schema( + NestedField(field_id=1, name="foo", field_type=StringType(), required=True), + NestedField(field_id=2, name="bar", field_type=IntegerType(), required=True), + NestedField(field_id=3, name="baz", field_type=BooleanType(), required=False), + NestedField( + field_id=4, + name="qux", + field_type=ListType(element_id=5, element_type=StringType(), element_required=True), + required=True, + ), + NestedField( + field_id=6, + name="quux", + field_type=MapType( + key_id=7, + key_type=StringType(), + value_id=8, + value_type=MapType(key_id=9, key_type=StringType(), value_id=10, value_type=IntegerType(), value_required=True), + value_required=True, + ), + required=True, + ), + NestedField( + field_id=11, + name="location", + field_type=MapType( + key_id=18, + value_id=19, + key_type=StructType( + NestedField(field_id=21, name="address", field_type=StringType(), required=True), + NestedField(field_id=22, name="city", field_type=StringType(), required=True), + NestedField(field_id=23, name="zip", field_type=IntegerType(), required=True), + ), + value_type=StructType( + NestedField(field_id=13, name="latitude", field_type=FloatType(), required=True), + NestedField(field_id=14, name="longitude", field_type=FloatType(), required=True), + ), + value_required=True, + ), + required=True, + ), + NestedField( + field_id=15, + name="person", + field_type=StructType( + NestedField(field_id=16, name="name", field_type=StringType(), required=False), + NestedField(field_id=17, name="age", field_type=IntegerType(), required=True), + ), + required=False, + ), + NestedField( + field_id=24, + name="points", + field_type=ListType( + element_id=25, + element_type=StructType( + NestedField(field_id=26, name="x", field_type=LongType(), required=True), + NestedField(field_id=27, name="y", field_type=LongType(), required=True), + ), + element_required=False, + ), + required=False, + ), + NestedField(field_id=28, name="float", field_type=FloatType(), required=True), + NestedField(field_id=29, name="double", field_type=DoubleType(), required=True), + schema_id=1, + identifier_field_ids=[1], + ) + + +@pytest.fixture(scope="session") +def all_avro_types() -> Dict[str, Any]: + return { + "type": "record", + "name": "all_avro_types", + "fields": [ + {"name": "primitive_string", "type": "string", "field-id": 100}, + {"name": "primitive_int", "type": "int", "field-id": 200}, + {"name": "primitive_long", "type": "long", "field-id": 300}, + {"name": "primitive_float", "type": "float", "field-id": 400}, + {"name": "primitive_double", "type": "double", "field-id": 500}, + {"name": "primitive_bytes", "type": "bytes", "field-id": 600}, + { + "type": "record", + "name": "Person", + "fields": [ + {"name": "name", "type": "string", "field-id": 701}, + {"name": "age", "type": "long", "field-id": 702}, + {"name": "gender", "type": ["string", "null"], "field-id": 703}, + ], + "field-id": 700, + }, + { + "name": "array_with_string", + "type": { + "type": "array", + "items": "string", + "default": [], + "element-id": 801, + }, + "field-id": 800, + }, + { + "name": "array_with_optional_string", + "type": [ + "null", + { + "type": "array", + "items": ["string", "null"], + "default": [], + "element-id": 901, + }, + ], + "field-id": 900, + }, + { + "name": "array_with_optional_record", + "type": [ + "null", + { + "type": "array", + "items": [ + "null", + { + "type": "record", + "name": "person", + "fields": [ + {"name": "name", "type": "string", "field-id": 1002}, + {"name": "age", "type": "long", "field-id": 1003}, + {"name": "gender", "type": ["string", "null"], "field-id": 1004}, + ], + }, + ], + "element-id": 1001, + }, + ], + "field-id": 1000, + }, + { + "name": "map_with_longs", + "type": { + "type": "map", + "values": "long", + "default": {}, + "key-id": 1101, + "value-id": 1102, + }, + "field-id": 1000, + }, + ], + } + + +EXAMPLE_TABLE_METADATA_V2 = { + "format-version": 2, + "table-uuid": "9c12d441-03fe-4693-9a96-a0705ddf69c1", + "location": "s3://bucket/test/location", + "last-sequence-number": 34, + "last-updated-ms": 1602638573590, + "last-column-id": 3, + "current-schema-id": 1, + "schemas": [ + {"type": "struct", "schema-id": 0, "fields": [{"id": 1, "name": "x", "required": True, "type": "long"}]}, + { + "type": "struct", + "schema-id": 1, + "identifier-field-ids": [1, 2], + "fields": [ + {"id": 1, "name": "x", "required": True, "type": "long"}, + {"id": 2, "name": "y", "required": True, "type": "long", "doc": "comment"}, + {"id": 3, "name": "z", "required": True, "type": "long"}, + ], + }, + ], + "default-spec-id": 0, + "partition-specs": [{"spec-id": 0, "fields": [{"name": "x", "transform": "identity", "source-id": 1, "field-id": 1000}]}], + "last-partition-id": 1000, + "default-sort-order-id": 3, + "sort-orders": [ + { + "order-id": 3, + "fields": [ + {"transform": "identity", "source-id": 2, "direction": "asc", "null-order": "nulls-first"}, + {"transform": "bucket[4]", "source-id": 3, "direction": "desc", "null-order": "nulls-last"}, + ], + } + ], + "properties": {"read.split.target.size": "134217728"}, + "current-snapshot-id": 3055729675574597004, + "snapshots": [ + { + "snapshot-id": 3051729675574597004, + "timestamp-ms": 1515100955770, + "sequence-number": 0, + "summary": {"operation": "append"}, + "manifest-list": "s3://a/b/1.avro", + }, + { + "snapshot-id": 3055729675574597004, + "parent-snapshot-id": 3051729675574597004, + "timestamp-ms": 1555100955770, + "sequence-number": 1, + "summary": {"operation": "append"}, + "manifest-list": "s3://a/b/2.avro", + "schema-id": 1, + }, + ], + "snapshot-log": [ + {"snapshot-id": 3051729675574597004, "timestamp-ms": 1515100955770}, + {"snapshot-id": 3055729675574597004, "timestamp-ms": 1555100955770}, + ], + "metadata-log": [{"metadata-file": "s3://bucket/.../v1.json", "timestamp-ms": 1515100}], + "refs": {"test": {"snapshot-id": 3051729675574597004, "type": "tag", "max-ref-age-ms": 10000000}}, +} + + +@pytest.fixture +def example_table_metadata_v2() -> Dict[str, Any]: + return EXAMPLE_TABLE_METADATA_V2 + + +@pytest.fixture(scope="session") +def metadata_location(tmp_path_factory: pytest.TempPathFactory) -> str: + from pyiceberg.io.pyarrow import PyArrowFileIO + + metadata_location = str(tmp_path_factory.mktemp("metadata") / f"{uuid.uuid4()}.metadata.json") + metadata = TableMetadataV2(**EXAMPLE_TABLE_METADATA_V2) + ToOutputFile.table_metadata(metadata, PyArrowFileIO().new_output(location=metadata_location), overwrite=True) + return metadata_location + + +@pytest.fixture(scope="session") +def metadata_location_gz(tmp_path_factory: pytest.TempPathFactory) -> str: + from pyiceberg.io.pyarrow import PyArrowFileIO + + metadata_location = str(tmp_path_factory.mktemp("metadata") / f"{uuid.uuid4()}.gz.metadata.json") + metadata = TableMetadataV2(**EXAMPLE_TABLE_METADATA_V2) + ToOutputFile.table_metadata(metadata, PyArrowFileIO().new_output(location=metadata_location), overwrite=True) + return metadata_location + + +manifest_entry_records = [ + { + "status": 1, + "snapshot_id": 8744736658442914487, + "data_file": { + "file_path": "/home/iceberg/warehouse/nyc/taxis_partitioned/data/VendorID=null/00000-633-d8a4223e-dc97-45a1-86e1-adaba6e8abd7-00001.parquet", + "file_format": "PARQUET", + "partition": {"VendorID": 1, "tpep_pickup_datetime": 1925}, + "record_count": 19513, + "file_size_in_bytes": 388872, + "block_size_in_bytes": 67108864, + "column_sizes": [ + {"key": 1, "value": 53}, + {"key": 2, "value": 98153}, + {"key": 3, "value": 98693}, + {"key": 4, "value": 53}, + {"key": 5, "value": 53}, + {"key": 6, "value": 53}, + {"key": 7, "value": 17425}, + {"key": 8, "value": 18528}, + {"key": 9, "value": 53}, + {"key": 10, "value": 44788}, + {"key": 11, "value": 35571}, + {"key": 12, "value": 53}, + {"key": 13, "value": 1243}, + {"key": 14, "value": 2355}, + {"key": 15, "value": 12750}, + {"key": 16, "value": 4029}, + {"key": 17, "value": 110}, + {"key": 18, "value": 47194}, + {"key": 19, "value": 2948}, + ], + "value_counts": [ + {"key": 1, "value": 19513}, + {"key": 2, "value": 19513}, + {"key": 3, "value": 19513}, + {"key": 4, "value": 19513}, + {"key": 5, "value": 19513}, + {"key": 6, "value": 19513}, + {"key": 7, "value": 19513}, + {"key": 8, "value": 19513}, + {"key": 9, "value": 19513}, + {"key": 10, "value": 19513}, + {"key": 11, "value": 19513}, + {"key": 12, "value": 19513}, + {"key": 13, "value": 19513}, + {"key": 14, "value": 19513}, + {"key": 15, "value": 19513}, + {"key": 16, "value": 19513}, + {"key": 17, "value": 19513}, + {"key": 18, "value": 19513}, + {"key": 19, "value": 19513}, + ], + "null_value_counts": [ + {"key": 1, "value": 19513}, + {"key": 2, "value": 0}, + {"key": 3, "value": 0}, + {"key": 4, "value": 19513}, + {"key": 5, "value": 19513}, + {"key": 6, "value": 19513}, + {"key": 7, "value": 0}, + {"key": 8, "value": 0}, + {"key": 9, "value": 19513}, + {"key": 10, "value": 0}, + {"key": 11, "value": 0}, + {"key": 12, "value": 19513}, + {"key": 13, "value": 0}, + {"key": 14, "value": 0}, + {"key": 15, "value": 0}, + {"key": 16, "value": 0}, + {"key": 17, "value": 0}, + {"key": 18, "value": 0}, + {"key": 19, "value": 0}, + ], + "nan_value_counts": [ + {"key": 16, "value": 0}, + {"key": 17, "value": 0}, + {"key": 18, "value": 0}, + {"key": 19, "value": 0}, + {"key": 10, "value": 0}, + {"key": 11, "value": 0}, + {"key": 12, "value": 0}, + {"key": 13, "value": 0}, + {"key": 14, "value": 0}, + {"key": 15, "value": 0}, + ], + "lower_bounds": [ + {"key": 2, "value": b"2020-04-01 00:00"}, + {"key": 3, "value": b"2020-04-01 00:12"}, + {"key": 7, "value": b"\x03\x00\x00\x00"}, + {"key": 8, "value": b"\x01\x00\x00\x00"}, + {"key": 10, "value": b"\xf6(\\\x8f\xc2\x05S\xc0"}, + {"key": 11, "value": b"\x00\x00\x00\x00\x00\x00\x00\x00"}, + {"key": 13, "value": b"\x00\x00\x00\x00\x00\x00\x00\x00"}, + {"key": 14, "value": b"\x00\x00\x00\x00\x00\x00\xe0\xbf"}, + {"key": 15, "value": b")\\\x8f\xc2\xf5(\x08\xc0"}, + {"key": 16, "value": b"\x00\x00\x00\x00\x00\x00\x00\x00"}, + {"key": 17, "value": b"\x00\x00\x00\x00\x00\x00\x00\x00"}, + {"key": 18, "value": b"\xf6(\\\x8f\xc2\xc5S\xc0"}, + {"key": 19, "value": b"\x00\x00\x00\x00\x00\x00\x04\xc0"}, + ], + "upper_bounds": [ + {"key": 2, "value": b"2020-04-30 23:5:"}, + {"key": 3, "value": b"2020-05-01 00:41"}, + {"key": 7, "value": b"\t\x01\x00\x00"}, + {"key": 8, "value": b"\t\x01\x00\x00"}, + {"key": 10, "value": b"\xcd\xcc\xcc\xcc\xcc,_@"}, + {"key": 11, "value": b"\x1f\x85\xebQ\\\xe2\xfe@"}, + {"key": 13, "value": b"\x00\x00\x00\x00\x00\x00\x12@"}, + {"key": 14, "value": b"\x00\x00\x00\x00\x00\x00\xe0?"}, + {"key": 15, "value": b"q=\n\xd7\xa3\xf01@"}, + {"key": 16, "value": b"\x00\x00\x00\x00\x00`B@"}, + {"key": 17, "value": b"333333\xd3?"}, + {"key": 18, "value": b"\x00\x00\x00\x00\x00\x18b@"}, + {"key": 19, "value": b"\x00\x00\x00\x00\x00\x00\x04@"}, + ], + "key_metadata": None, + "split_offsets": [4], + "sort_order_id": 0, + }, + }, + { + "status": 1, + "snapshot_id": 8744736658442914487, + "data_file": { + "file_path": "/home/iceberg/warehouse/nyc/taxis_partitioned/data/VendorID=1/00000-633-d8a4223e-dc97-45a1-86e1-adaba6e8abd7-00002.parquet", + "file_format": "PARQUET", + "partition": {"VendorID": 1, "tpep_pickup_datetime": 1925}, + "record_count": 95050, + "file_size_in_bytes": 1265950, + "block_size_in_bytes": 67108864, + "column_sizes": [ + {"key": 1, "value": 318}, + {"key": 2, "value": 329806}, + {"key": 3, "value": 331632}, + {"key": 4, "value": 15343}, + {"key": 5, "value": 2351}, + {"key": 6, "value": 3389}, + {"key": 7, "value": 71269}, + {"key": 8, "value": 76429}, + {"key": 9, "value": 16383}, + {"key": 10, "value": 86992}, + {"key": 11, "value": 89608}, + {"key": 12, "value": 265}, + {"key": 13, "value": 19377}, + {"key": 14, "value": 1692}, + {"key": 15, "value": 76162}, + {"key": 16, "value": 4354}, + {"key": 17, "value": 759}, + {"key": 18, "value": 120650}, + {"key": 19, "value": 11804}, + ], + "value_counts": [ + {"key": 1, "value": 95050}, + {"key": 2, "value": 95050}, + {"key": 3, "value": 95050}, + {"key": 4, "value": 95050}, + {"key": 5, "value": 95050}, + {"key": 6, "value": 95050}, + {"key": 7, "value": 95050}, + {"key": 8, "value": 95050}, + {"key": 9, "value": 95050}, + {"key": 10, "value": 95050}, + {"key": 11, "value": 95050}, + {"key": 12, "value": 95050}, + {"key": 13, "value": 95050}, + {"key": 14, "value": 95050}, + {"key": 15, "value": 95050}, + {"key": 16, "value": 95050}, + {"key": 17, "value": 95050}, + {"key": 18, "value": 95050}, + {"key": 19, "value": 95050}, + ], + "null_value_counts": [ + {"key": 1, "value": 0}, + {"key": 2, "value": 0}, + {"key": 3, "value": 0}, + {"key": 4, "value": 0}, + {"key": 5, "value": 0}, + {"key": 6, "value": 0}, + {"key": 7, "value": 0}, + {"key": 8, "value": 0}, + {"key": 9, "value": 0}, + {"key": 10, "value": 0}, + {"key": 11, "value": 0}, + {"key": 12, "value": 95050}, + {"key": 13, "value": 0}, + {"key": 14, "value": 0}, + {"key": 15, "value": 0}, + {"key": 16, "value": 0}, + {"key": 17, "value": 0}, + {"key": 18, "value": 0}, + {"key": 19, "value": 0}, + ], + "nan_value_counts": [ + {"key": 16, "value": 0}, + {"key": 17, "value": 0}, + {"key": 18, "value": 0}, + {"key": 19, "value": 0}, + {"key": 10, "value": 0}, + {"key": 11, "value": 0}, + {"key": 12, "value": 0}, + {"key": 13, "value": 0}, + {"key": 14, "value": 0}, + {"key": 15, "value": 0}, + ], + "lower_bounds": [ + {"key": 1, "value": b"\x01\x00\x00\x00"}, + {"key": 2, "value": b"2020-04-01 00:00"}, + {"key": 3, "value": b"2020-04-01 00:03"}, + {"key": 4, "value": b"\x00\x00\x00\x00"}, + {"key": 5, "value": b"\x01\x00\x00\x00"}, + {"key": 6, "value": b"N"}, + {"key": 7, "value": b"\x01\x00\x00\x00"}, + {"key": 8, "value": b"\x01\x00\x00\x00"}, + {"key": 9, "value": b"\x01\x00\x00\x00"}, + {"key": 10, "value": b"\x00\x00\x00\x00\x00\x00\x00\x00"}, + {"key": 11, "value": b"\x00\x00\x00\x00\x00\x00\x00\x00"}, + {"key": 13, "value": b"\x00\x00\x00\x00\x00\x00\x00\x00"}, + {"key": 14, "value": b"\x00\x00\x00\x00\x00\x00\x00\x00"}, + {"key": 15, "value": b"\x00\x00\x00\x00\x00\x00\x00\x00"}, + {"key": 16, "value": b"\x00\x00\x00\x00\x00\x00\x00\x00"}, + {"key": 17, "value": b"\x00\x00\x00\x00\x00\x00\x00\x00"}, + {"key": 18, "value": b"\x00\x00\x00\x00\x00\x00\x00\x00"}, + {"key": 19, "value": b"\x00\x00\x00\x00\x00\x00\x00\x00"}, + ], + "upper_bounds": [ + {"key": 1, "value": b"\x01\x00\x00\x00"}, + {"key": 2, "value": b"2020-04-30 23:5:"}, + {"key": 3, "value": b"2020-05-01 00:1:"}, + {"key": 4, "value": b"\x06\x00\x00\x00"}, + {"key": 5, "value": b"c\x00\x00\x00"}, + {"key": 6, "value": b"Y"}, + {"key": 7, "value": b"\t\x01\x00\x00"}, + {"key": 8, "value": b"\t\x01\x00\x00"}, + {"key": 9, "value": b"\x04\x00\x00\x00"}, + {"key": 10, "value": b"\\\x8f\xc2\xf5(8\x8c@"}, + {"key": 11, "value": b"\xcd\xcc\xcc\xcc\xcc,f@"}, + {"key": 13, "value": b"\x00\x00\x00\x00\x00\x00\x1c@"}, + {"key": 14, "value": b"\x9a\x99\x99\x99\x99\x99\xf1?"}, + {"key": 15, "value": b"\x00\x00\x00\x00\x00\x00Y@"}, + {"key": 16, "value": b"\x00\x00\x00\x00\x00\xb0X@"}, + {"key": 17, "value": b"333333\xd3?"}, + {"key": 18, "value": b"\xc3\xf5(\\\x8f:\x8c@"}, + {"key": 19, "value": b"\x00\x00\x00\x00\x00\x00\x04@"}, + ], + "key_metadata": None, + "split_offsets": [4], + "sort_order_id": 0, + }, + }, +] + +manifest_file_records_v1 = [ + { + "manifest_path": "/home/iceberg/warehouse/nyc/taxis_partitioned/metadata/0125c686-8aa6-4502-bdcc-b6d17ca41a3b-m0.avro", + "manifest_length": 7989, + "partition_spec_id": 0, + "added_snapshot_id": 9182715666859759686, + "added_data_files_count": 3, + "existing_data_files_count": 0, + "deleted_data_files_count": 0, + "partitions": [ + {"contains_null": True, "contains_nan": False, "lower_bound": b"\x01\x00\x00\x00", "upper_bound": b"\x02\x00\x00\x00"} + ], + "added_rows_count": 237993, + "existing_rows_count": 0, + "deleted_rows_count": 0, + } +] + +manifest_file_records_v2 = [ + { + "manifest_path": "/home/iceberg/warehouse/nyc/taxis_partitioned/metadata/0125c686-8aa6-4502-bdcc-b6d17ca41a3b-m0.avro", + "manifest_length": 7989, + "partition_spec_id": 0, + "content": 1, + "sequence_number": 3, + "min_sequence_number": 3, + "added_snapshot_id": 9182715666859759686, + "added_files_count": 3, + "existing_files_count": 0, + "deleted_files_count": 0, + "added_rows_count": 237993, + "existing_rows_count": 0, + "deleted_rows_count": 0, + "partitions": [ + {"contains_null": True, "contains_nan": False, "lower_bound": b"\x01\x00\x00\x00", "upper_bound": b"\x02\x00\x00\x00"} + ], + "key_metadata": b"\x19\x25", + } +] + + +@pytest.fixture(scope="session") +def avro_schema_manifest_file_v1() -> Dict[str, Any]: + return { + "type": "record", + "name": "manifest_file", + "fields": [ + {"name": "manifest_path", "type": "string", "doc": "Location URI with FS scheme", "field-id": 500}, + {"name": "manifest_length", "type": "long", "doc": "Total file size in bytes", "field-id": 501}, + {"name": "partition_spec_id", "type": "int", "doc": "Spec ID used to write", "field-id": 502}, + { + "name": "added_snapshot_id", + "type": ["null", "long"], + "doc": "Snapshot ID that added the manifest", + "default": None, + "field-id": 503, + }, + { + "name": "added_data_files_count", + "type": ["null", "int"], + "doc": "Added entry count", + "default": None, + "field-id": 504, + }, + { + "name": "existing_data_files_count", + "type": ["null", "int"], + "doc": "Existing entry count", + "default": None, + "field-id": 505, + }, + { + "name": "deleted_data_files_count", + "type": ["null", "int"], + "doc": "Deleted entry count", + "default": None, + "field-id": 506, + }, + { + "name": "partitions", + "type": [ + "null", + { + "type": "array", + "items": { + "type": "record", + "name": "r508", + "fields": [ + { + "name": "contains_null", + "type": "boolean", + "doc": "True if any file has a null partition value", + "field-id": 509, + }, + { + "name": "contains_nan", + "type": ["null", "boolean"], + "doc": "True if any file has a nan partition value", + "default": None, + "field-id": 518, + }, + { + "name": "lower_bound", + "type": ["null", "bytes"], + "doc": "Partition lower bound for all files", + "default": None, + "field-id": 510, + }, + { + "name": "upper_bound", + "type": ["null", "bytes"], + "doc": "Partition upper bound for all files", + "default": None, + "field-id": 511, + }, + ], + }, + "element-id": 508, + }, + ], + "doc": "Summary for each partition", + "default": None, + "field-id": 507, + }, + {"name": "added_rows_count", "type": ["null", "long"], "doc": "Added rows count", "default": None, "field-id": 512}, + { + "name": "existing_rows_count", + "type": ["null", "long"], + "doc": "Existing rows count", + "default": None, + "field-id": 513, + }, + { + "name": "deleted_rows_count", + "type": ["null", "long"], + "doc": "Deleted rows count", + "default": None, + "field-id": 514, + }, + ], + } + + +@pytest.fixture(scope="session") +def avro_schema_manifest_file_v2() -> Dict[str, Any]: + return { + "type": "record", + "name": "manifest_file", + "fields": [ + {"name": "manifest_path", "type": "string", "doc": "Location URI with FS scheme", "field-id": 500}, + {"name": "manifest_length", "type": "long", "doc": "Total file size in bytes", "field-id": 501}, + {"name": "partition_spec_id", "type": "int", "doc": "Spec ID used to write", "field-id": 502}, + {"name": "content", "type": "int", "doc": "Contents of the manifest: 0=data, 1=deletes", "field-id": 517}, + { + "name": "sequence_number", + "type": ["null", "long"], + "doc": "Sequence number when the manifest was added", + "field-id": 515, + }, + { + "name": "min_sequence_number", + "type": ["null", "long"], + "doc": "Lowest sequence number in the manifest", + "field-id": 516, + }, + {"name": "added_snapshot_id", "type": "long", "doc": "Snapshot ID that added the manifest", "field-id": 503}, + {"name": "added_files_count", "type": "int", "doc": "Added entry count", "field-id": 504}, + {"name": "existing_files_count", "type": "int", "doc": "Existing entry count", "field-id": 505}, + {"name": "deleted_files_count", "type": "int", "doc": "Deleted entry count", "field-id": 506}, + {"name": "added_rows_count", "type": "long", "doc": "Added rows count", "field-id": 512}, + {"name": "existing_rows_count", "type": "long", "doc": "Existing rows count", "field-id": 513}, + {"name": "deleted_rows_count", "type": "long", "doc": "Deleted rows count", "field-id": 514}, + { + "name": "partitions", + "type": [ + "null", + { + "type": "array", + "items": { + "type": "record", + "name": "r508", + "fields": [ + { + "name": "contains_null", + "type": "boolean", + "doc": "True if any file has a null partition value", + "field-id": 509, + }, + { + "name": "contains_nan", + "type": ["null", "boolean"], + "doc": "True if any file has a nan partition value", + "default": None, + "field-id": 518, + }, + { + "name": "lower_bound", + "type": ["null", "bytes"], + "doc": "Partition lower bound for all files", + "default": None, + "field-id": 510, + }, + { + "name": "upper_bound", + "type": ["null", "bytes"], + "doc": "Partition upper bound for all files", + "default": None, + "field-id": 511, + }, + ], + }, + "element-id": 508, + }, + ], + "doc": "Summary for each partition", + "default": None, + "field-id": 507, + }, + ], + } + + +@pytest.fixture(scope="session") +def avro_schema_manifest_entry() -> Dict[str, Any]: + return { + "type": "record", + "name": "manifest_entry", + "fields": [ + {"name": "status", "type": "int", "field-id": 0}, + {"name": "snapshot_id", "type": ["null", "long"], "default": None, "field-id": 1}, + { + "name": "data_file", + "type": { + "type": "record", + "name": "r2", + "fields": [ + {"name": "file_path", "type": "string", "doc": "Location URI with FS scheme", "field-id": 100}, + { + "name": "file_format", + "type": "string", + "doc": "File format name: avro, orc, or parquet", + "field-id": 101, + }, + { + "name": "partition", + "type": { + "type": "record", + "name": "r102", + "fields": [ + {"field-id": 1000, "default": None, "name": "VendorID", "type": ["null", "int"]}, + { + "field-id": 1001, + "default": None, + "name": "tpep_pickup_datetime", + "type": ["null", {"type": "int", "logicalType": "date"}], + }, + ], + }, + "field-id": 102, + }, + {"name": "record_count", "type": "long", "doc": "Number of records in the file", "field-id": 103}, + {"name": "file_size_in_bytes", "type": "long", "doc": "Total file size in bytes", "field-id": 104}, + {"name": "block_size_in_bytes", "type": "long", "field-id": 105}, + { + "name": "column_sizes", + "type": [ + "null", + { + "type": "array", + "items": { + "type": "record", + "name": "k117_v118", + "fields": [ + {"name": "key", "type": "int", "field-id": 117}, + {"name": "value", "type": "long", "field-id": 118}, + ], + }, + "logicalType": "map", + }, + ], + "doc": "Map of column id to total size on disk", + "default": None, + "field-id": 108, + }, + { + "name": "value_counts", + "type": [ + "null", + { + "type": "array", + "items": { + "type": "record", + "name": "k119_v120", + "fields": [ + {"name": "key", "type": "int", "field-id": 119}, + {"name": "value", "type": "long", "field-id": 120}, + ], + }, + "logicalType": "map", + }, + ], + "doc": "Map of column id to total count, including null and NaN", + "default": None, + "field-id": 109, + }, + { + "name": "null_value_counts", + "type": [ + "null", + { + "type": "array", + "items": { + "type": "record", + "name": "k121_v122", + "fields": [ + {"name": "key", "type": "int", "field-id": 121}, + {"name": "value", "type": "long", "field-id": 122}, + ], + }, + "logicalType": "map", + }, + ], + "doc": "Map of column id to null value count", + "default": None, + "field-id": 110, + }, + { + "name": "nan_value_counts", + "type": [ + "null", + { + "type": "array", + "items": { + "type": "record", + "name": "k138_v139", + "fields": [ + {"name": "key", "type": "int", "field-id": 138}, + {"name": "value", "type": "long", "field-id": 139}, + ], + }, + "logicalType": "map", + }, + ], + "doc": "Map of column id to number of NaN values in the column", + "default": None, + "field-id": 137, + }, + { + "name": "lower_bounds", + "type": [ + "null", + { + "type": "array", + "items": { + "type": "record", + "name": "k126_v127", + "fields": [ + {"name": "key", "type": "int", "field-id": 126}, + {"name": "value", "type": "bytes", "field-id": 127}, + ], + }, + "logicalType": "map", + }, + ], + "doc": "Map of column id to lower bound", + "default": None, + "field-id": 125, + }, + { + "name": "upper_bounds", + "type": [ + "null", + { + "type": "array", + "items": { + "type": "record", + "name": "k129_v130", + "fields": [ + {"name": "key", "type": "int", "field-id": 129}, + {"name": "value", "type": "bytes", "field-id": 130}, + ], + }, + "logicalType": "map", + }, + ], + "doc": "Map of column id to upper bound", + "default": None, + "field-id": 128, + }, + { + "name": "key_metadata", + "type": ["null", "bytes"], + "doc": "Encryption key metadata blob", + "default": None, + "field-id": 131, + }, + { + "name": "split_offsets", + "type": ["null", {"type": "array", "items": "long", "element-id": 133}], + "doc": "Splittable offsets", + "default": None, + "field-id": 132, + }, + { + "name": "sort_order_id", + "type": ["null", "int"], + "doc": "Sort order ID", + "default": None, + "field-id": 140, + }, + ], + }, + "field-id": 2, + }, + ], + } + + +@pytest.fixture(scope="session") +def simple_struct() -> StructType: + return StructType( + NestedField(id=1, name="required_field", field_type=StringType(), required=True, doc="this is a doc"), + NestedField(id=2, name="optional_field", field_type=IntegerType()), + ) + + +@pytest.fixture(scope="session") +def simple_list() -> ListType: + return ListType(element_id=22, element=StringType(), element_required=True) + + +@pytest.fixture(scope="session") +def simple_map() -> MapType: + return MapType(key_id=19, key_type=StringType(), value_id=25, value_type=DoubleType(), value_required=False) + + +class LocalOutputFile(OutputFile): + """An OutputFile implementation for local files (for test use only).""" + + def __init__(self, location: str) -> None: + parsed_location = urlparse(location) # Create a ParseResult from the uri + if ( + parsed_location.scheme and parsed_location.scheme != "file" + ): # Validate that an uri is provided with a scheme of `file` + raise ValueError("LocalOutputFile location must have a scheme of `file`") + elif parsed_location.netloc: + raise ValueError(f"Network location is not allowed for LocalOutputFile: {parsed_location.netloc}") + + super().__init__(location=location) + self._path = parsed_location.path + + def __len__(self) -> int: + """Return the length of an instance of the LocalOutputFile class.""" + return os.path.getsize(self._path) + + def exists(self) -> bool: + return os.path.exists(self._path) + + def to_input_file(self) -> "PyArrowFile": + from pyiceberg.io.pyarrow import PyArrowFileIO + + return PyArrowFileIO().new_input(location=self.location) + + def create(self, overwrite: bool = False) -> OutputStream: + output_file = open(self._path, "wb" if overwrite else "xb") + if not issubclass(type(output_file), OutputStream): + raise TypeError("Object returned from LocalOutputFile.create(...) does not match the OutputStream protocol.") + return output_file + + +@pytest.fixture(scope="session") +def generated_manifest_entry_file(avro_schema_manifest_entry: Dict[str, Any]) -> Generator[str, None, None]: + from fastavro import parse_schema, writer + + parsed_schema = parse_schema(avro_schema_manifest_entry) + + with TemporaryDirectory() as tmpdir: + tmp_avro_file = tmpdir + "/manifest.avro" + with open(tmp_avro_file, "wb") as out: + writer(out, parsed_schema, manifest_entry_records) + yield tmp_avro_file + + +@pytest.fixture(scope="session") +def generated_manifest_file_file_v1( + avro_schema_manifest_file_v1: Dict[str, Any], generated_manifest_entry_file: str +) -> Generator[str, None, None]: + from fastavro import parse_schema, writer + + parsed_schema = parse_schema(avro_schema_manifest_file_v1) + + # Make sure that a valid manifest_path is set + manifest_file_records_v1[0]["manifest_path"] = generated_manifest_entry_file + + with TemporaryDirectory() as tmpdir: + tmp_avro_file = tmpdir + "/manifest.avro" + with open(tmp_avro_file, "wb") as out: + writer(out, parsed_schema, manifest_file_records_v1) + yield tmp_avro_file + + +@pytest.fixture(scope="session") +def generated_manifest_file_file_v2( + avro_schema_manifest_file_v2: Dict[str, Any], generated_manifest_entry_file: str +) -> Generator[str, None, None]: + from fastavro import parse_schema, writer + + parsed_schema = parse_schema(avro_schema_manifest_file_v2) + + # Make sure that a valid manifest_path is set + manifest_file_records_v2[0]["manifest_path"] = generated_manifest_entry_file + + with TemporaryDirectory() as tmpdir: + tmp_avro_file = tmpdir + "/manifest.avro" + with open(tmp_avro_file, "wb") as out: + writer(out, parsed_schema, manifest_file_records_v2) + yield tmp_avro_file + + +@pytest.fixture(scope="session") +def iceberg_manifest_entry_schema() -> Schema: + return Schema( + NestedField(field_id=0, name="status", field_type=IntegerType(), required=True), + NestedField(field_id=1, name="snapshot_id", field_type=LongType(), required=False), + NestedField( + field_id=2, + name="data_file", + field_type=StructType( + NestedField( + field_id=100, + name="file_path", + field_type=StringType(), + doc="Location URI with FS scheme", + required=True, + ), + NestedField( + field_id=101, + name="file_format", + field_type=StringType(), + doc="File format name: avro, orc, or parquet", + required=True, + ), + NestedField( + field_id=102, + name="partition", + field_type=StructType( + NestedField( + field_id=1000, + name="VendorID", + field_type=IntegerType(), + required=False, + ), + NestedField( + field_id=1001, + name="tpep_pickup_datetime", + field_type=DateType(), + required=False, + ), + ), + required=True, + ), + NestedField( + field_id=103, + name="record_count", + field_type=LongType(), + doc="Number of records in the file", + required=True, + ), + NestedField( + field_id=104, + name="file_size_in_bytes", + field_type=LongType(), + doc="Total file size in bytes", + required=True, + ), + NestedField( + field_id=105, + name="block_size_in_bytes", + field_type=LongType(), + required=True, + ), + NestedField( + field_id=108, + name="column_sizes", + field_type=MapType( + key_id=117, + key_type=IntegerType(), + value_id=118, + value_type=LongType(), + value_required=True, + ), + doc="Map of column id to total size on disk", + required=False, + ), + NestedField( + field_id=109, + name="value_counts", + field_type=MapType( + key_id=119, + key_type=IntegerType(), + value_id=120, + value_type=LongType(), + value_required=True, + ), + doc="Map of column id to total count, including null and NaN", + required=False, + ), + NestedField( + field_id=110, + name="null_value_counts", + field_type=MapType( + key_id=121, + key_type=IntegerType(), + value_id=122, + value_type=LongType(), + value_required=True, + ), + doc="Map of column id to null value count", + required=False, + ), + NestedField( + field_id=137, + name="nan_value_counts", + field_type=MapType( + key_id=138, + key_type=IntegerType(), + value_id=139, + value_type=LongType(), + value_required=True, + ), + doc="Map of column id to number of NaN values in the column", + required=False, + ), + NestedField( + field_id=125, + name="lower_bounds", + field_type=MapType( + key_id=126, + key_type=IntegerType(), + value_id=127, + value_type=BinaryType(), + value_required=True, + ), + doc="Map of column id to lower bound", + required=False, + ), + NestedField( + field_id=128, + name="upper_bounds", + field_type=MapType( + key_id=129, + key_type=IntegerType(), + value_id=130, + value_type=BinaryType(), + value_required=True, + ), + doc="Map of column id to upper bound", + required=False, + ), + NestedField( + field_id=131, + name="key_metadata", + field_type=BinaryType(), + doc="Encryption key metadata blob", + required=False, + ), + NestedField( + field_id=132, + name="split_offsets", + field_type=ListType( + element_id=133, + element_type=LongType(), + element_required=True, + ), + doc="Splittable offsets", + required=False, + ), + NestedField( + field_id=140, + name="sort_order_id", + field_type=IntegerType(), + doc="Sort order ID", + required=False, + ), + ), + required=True, + ), + schema_id=1, + identifier_field_ids=[], + ) + + +@pytest.fixture +def fsspec_fileio(request: pytest.FixtureRequest) -> FsspecFileIO: + properties = { + "s3.endpoint": request.config.getoption("--s3.endpoint"), + "s3.access-key-id": request.config.getoption("--s3.access-key-id"), + "s3.secret-access-key": request.config.getoption("--s3.secret-access-key"), + } + return fsspec.FsspecFileIO(properties=properties) + + +@pytest.fixture +def fsspec_fileio_gcs(request: pytest.FixtureRequest) -> FsspecFileIO: + properties = { + GCS_ENDPOINT: request.config.getoption("--gcs.endpoint"), + GCS_TOKEN: request.config.getoption("--gcs.oauth2.token"), + GCS_PROJECT_ID: request.config.getoption("--gcs.project-id"), + } + return fsspec.FsspecFileIO(properties=properties) + + +@pytest.fixture +def pyarrow_fileio_gcs(request: pytest.FixtureRequest) -> "PyArrowFileIO": + from pyiceberg.io.pyarrow import PyArrowFileIO + + properties = { + GCS_ENDPOINT: request.config.getoption("--gcs.endpoint"), + GCS_TOKEN: request.config.getoption("--gcs.oauth2.token"), + GCS_PROJECT_ID: request.config.getoption("--gcs.project-id"), + GCS_TOKEN_EXPIRES_AT_MS: datetime_to_millis(datetime.now()) + 60 * 1000, + } + return PyArrowFileIO(properties=properties) + + +class MockAWSResponse(aiobotocore.awsrequest.AioAWSResponse): + """A mocked aws response implementation (for test use only). + + See https://github.com/aio-libs/aiobotocore/issues/755. + """ + + def __init__(self, response: botocore.awsrequest.AWSResponse) -> None: + self._moto_response = response + self.status_code = response.status_code + self.raw = MockHttpClientResponse(response) + + # adapt async methods to use moto's response + async def _content_prop(self) -> bytes: + return self._moto_response.content + + async def _text_prop(self) -> str: + return self._moto_response.text + + +class MockHttpClientResponse(aiohttp.client_reqrep.ClientResponse): + """A mocked http client response implementation (for test use only). + + See https://github.com/aio-libs/aiobotocore/issues/755. + """ + + def __init__(self, response: botocore.awsrequest.AWSResponse) -> None: + async def read(*_: Any) -> bytes: + # streaming/range requests. used by s3fs + return response.content + + self.content = MagicMock(aiohttp.StreamReader) + self.content.read = read + self.response = response + + @property + def raw_headers(self) -> aiohttp.typedefs.RawHeaders: + # Return the headers encoded the way that aiobotocore expects them + return {k.encode("utf-8"): str(v).encode("utf-8") for k, v in self.response.headers.items()}.items() + + +def patch_aiobotocore() -> None: + """Patch aiobotocore to work with moto. + + See https://github.com/aio-libs/aiobotocore/issues/755. + """ + + def factory(original: Callable) -> Callable: # type: ignore + def patched_convert_to_response_dict( # type: ignore + http_response: botocore.awsrequest.AWSResponse, operation_model: botocore.model.OperationModel + ): + return original(MockAWSResponse(http_response), operation_model) + + return patched_convert_to_response_dict + + aiobotocore.endpoint.convert_to_response_dict = factory(aiobotocore.endpoint.convert_to_response_dict) + + +@pytest.fixture(name="_patch_aiobotocore") +def fixture_aiobotocore(): # type: ignore + """Patch aiobotocore to work with moto. + + pending close of this issue: https://github.com/aio-libs/aiobotocore/issues/755. + """ + stored_method = aiobotocore.endpoint.convert_to_response_dict + yield patch_aiobotocore() + # restore the changed method after the fixture is destroyed + aiobotocore.endpoint.convert_to_response_dict = stored_method + + +def aws_credentials() -> None: + os.environ["AWS_ACCESS_KEY_ID"] = "testing" + os.environ["AWS_SECRET_ACCESS_KEY"] = "testing" + os.environ["AWS_SECURITY_TOKEN"] = "testing" + os.environ["AWS_SESSION_TOKEN"] = "testing" + os.environ["AWS_DEFAULT_REGION"] = "us-east-1" + + +@pytest.fixture(name="_aws_credentials") +def fixture_aws_credentials() -> Generator[None, None, None]: + """Yield a mocked AWS Credentials for moto.""" + yield aws_credentials() # type: ignore + os.environ.pop("AWS_ACCESS_KEY_ID") + os.environ.pop("AWS_SECRET_ACCESS_KEY") + os.environ.pop("AWS_SECURITY_TOKEN") + os.environ.pop("AWS_SESSION_TOKEN") + os.environ.pop("AWS_DEFAULT_REGION") + + +@pytest.fixture(name="_s3") +def fixture_s3(_aws_credentials: None) -> Generator[boto3.client, None, None]: + """Yield a mocked S3 client.""" + with mock_s3(): + yield boto3.client("s3", region_name="us-east-1") + + +@pytest.fixture(name="_glue") +def fixture_glue(_aws_credentials: None) -> Generator[boto3.client, None, None]: + """Yield a mocked glue client.""" + with mock_glue(): + yield boto3.client("glue", region_name="us-east-1") + + +@pytest.fixture(name="_dynamodb") +def fixture_dynamodb(_aws_credentials: None) -> Generator[boto3.client, None, None]: + """Yield a mocked DynamoDB client.""" + with mock_dynamodb(): + yield boto3.client("dynamodb", region_name="us-east-1") + + +@pytest.fixture +def adlfs_fsspec_fileio(request: pytest.FixtureRequest) -> Generator[FsspecFileIO, None, None]: + from azure.storage.blob import BlobServiceClient + + azurite_url = request.config.getoption("--adlfs.endpoint") + azurite_account_name = request.config.getoption("--adlfs.account-name") + azurite_account_key = request.config.getoption("--adlfs.account-key") + azurite_connection_string = f"DefaultEndpointsProtocol=http;AccountName={azurite_account_name};AccountKey={azurite_account_key};BlobEndpoint={azurite_url}/{azurite_account_name};" + properties = { + "adlfs.connection-string": azurite_connection_string, + "adlfs.account-name": azurite_account_name, + } + + bbs = BlobServiceClient.from_connection_string(conn_str=azurite_connection_string) + bbs.create_container("tests") + yield fsspec.FsspecFileIO(properties=properties) + bbs.delete_container("tests") + + +@pytest.fixture(scope="session") +def empty_home_dir_path(tmp_path_factory: pytest.TempPathFactory) -> str: + home_path = str(tmp_path_factory.mktemp("home")) + return home_path + + +RANDOM_LENGTH = 20 +NUM_TABLES = 2 + + +@pytest.fixture() +def table_name() -> str: + prefix = "my_iceberg_table-" + random_tag = "".join(choice(string.ascii_letters) for _ in range(RANDOM_LENGTH)) + return (prefix + random_tag).lower() + + +@pytest.fixture() +def table_list(table_name: str) -> List[str]: + return [f"{table_name}_{idx}" for idx in range(NUM_TABLES)] + + +@pytest.fixture() +def database_name() -> str: + prefix = "my_iceberg_database-" + random_tag = "".join(choice(string.ascii_letters) for _ in range(RANDOM_LENGTH)) + return (prefix + random_tag).lower() + + +@pytest.fixture() +def database_list(database_name: str) -> List[str]: + return [f"{database_name}_{idx}" for idx in range(NUM_TABLES)] + + +BUCKET_NAME = "test_bucket" +TABLE_METADATA_LOCATION_REGEX = re.compile( + r"""s3://test_bucket/my_iceberg_database-[a-z]{20}.db/ + my_iceberg_table-[a-z]{20}/metadata/ + 00000-[a-f0-9]{8}-?[a-f0-9]{4}-?4[a-f0-9]{3}-?[89ab][a-f0-9]{3}-?[a-f0-9]{12}.metadata.json""", + re.X, +) + + +@pytest.fixture(name="_bucket_initialize") +def fixture_s3_bucket(_s3) -> None: # type: ignore + _s3.create_bucket(Bucket=BUCKET_NAME) + + +def get_bucket_name() -> str: + """Set the environment variable AWS_TEST_BUCKET for a default bucket to test.""" + bucket_name = os.getenv("AWS_TEST_BUCKET") + if bucket_name is None: + raise ValueError("Please specify a bucket to run the test by setting environment variable AWS_TEST_BUCKET") + return bucket_name + + +def get_s3_path(bucket_name: str, database_name: Optional[str] = None, table_name: Optional[str] = None) -> str: + result_path = f"s3://{bucket_name}" + if database_name is not None: + result_path += f"/{database_name}.db" + + if table_name is not None: + result_path += f"/{table_name}" + return result_path + + +@pytest.fixture(name="s3", scope="module") +def fixture_s3_client() -> boto3.client: + yield boto3.client("s3") + + +def clean_up(test_catalog: Catalog) -> None: + """Clean all databases and tables created during the integration test.""" + for database_tuple in test_catalog.list_namespaces(): + database_name = database_tuple[0] + if "my_iceberg_database-" in database_name: + for identifier in test_catalog.list_tables(database_name): + test_catalog.purge_table(identifier) + test_catalog.drop_namespace(database_name) + + +@pytest.fixture +def data_file(table_schema_simple: Schema, tmp_path: str) -> str: + import pyarrow as pa + from pyarrow import parquet as pq + + table = pa.table( + {"foo": ["a", "b", "c"], "bar": [1, 2, 3], "baz": [True, False, None]}, + metadata={"iceberg.schema": table_schema_simple.model_dump_json()}, + ) + + file_path = f"{tmp_path}/0000-data.parquet" + pq.write_table(table=table, where=file_path) + return file_path + + +@pytest.fixture +def example_task(data_file: str) -> FileScanTask: + return FileScanTask( + data_file=DataFile(file_path=data_file, file_format=FileFormat.PARQUET, file_size_in_bytes=1925), + ) + + +@pytest.fixture +def table(example_table_metadata_v2: Dict[str, Any]) -> Table: + table_metadata = TableMetadataV2(**example_table_metadata_v2) + return Table( + identifier=("database", "table"), + metadata=table_metadata, + metadata_location=f"{table_metadata.location}/uuid.metadata.json", + io=load_file_io(), + catalog=NoopCatalog("NoopCatalog"), + ) diff --git a/tests/expressions/test_evaluator.py b/tests/expressions/test_evaluator.py new file mode 100644 index 0000000000..7d97a6d2d2 --- /dev/null +++ b/tests/expressions/test_evaluator.py @@ -0,0 +1,927 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# pylint:disable=redefined-outer-name +from typing import Any + +import pytest + +from pyiceberg.conversions import to_bytes +from pyiceberg.expressions import ( + And, + EqualTo, + GreaterThan, + GreaterThanOrEqual, + In, + IsNaN, + IsNull, + LessThan, + LessThanOrEqual, + Not, + NotEqualTo, + NotIn, + NotNaN, + NotNull, + NotStartsWith, + Or, + StartsWith, +) +from pyiceberg.expressions.visitors import _InclusiveMetricsEvaluator +from pyiceberg.manifest import DataFile, FileFormat +from pyiceberg.schema import Schema +from pyiceberg.types import ( + DoubleType, + FloatType, + IcebergType, + IntegerType, + NestedField, + PrimitiveType, + StringType, +) + +INT_MIN_VALUE = 30 +INT_MAX_VALUE = 79 + + +def _to_byte_buffer(field_type: IcebergType, val: Any) -> bytes: + if not isinstance(field_type, PrimitiveType): + raise ValueError(f"Expected a PrimitiveType, got: {type(field_type)}") + return to_bytes(field_type, val) + + +INT_MIN = _to_byte_buffer(IntegerType(), INT_MIN_VALUE) +INT_MAX = _to_byte_buffer(IntegerType(), INT_MAX_VALUE) + +STRING_MIN = _to_byte_buffer(StringType(), "a") +STRING_MAX = _to_byte_buffer(StringType(), "z") + + +@pytest.fixture +def schema_data_file() -> Schema: + return Schema( + NestedField(1, "id", IntegerType(), required=True), + NestedField(2, "no_stats", IntegerType(), required=False), + NestedField(3, "required", StringType(), required=True), + NestedField(4, "all_nulls", StringType(), required=False), + NestedField(5, "some_nulls", StringType(), required=False), + NestedField(6, "no_nulls", StringType(), required=False), + NestedField(7, "all_nans", DoubleType(), required=False), + NestedField(8, "some_nans", FloatType(), required=False), + NestedField(9, "no_nans", FloatType(), required=False), + NestedField(10, "all_nulls_double", DoubleType(), required=False), + NestedField(11, "all_nans_v1_stats", FloatType(), required=False), + NestedField(12, "nan_and_null_only", DoubleType(), required=False), + NestedField(13, "no_nan_stats", DoubleType(), required=False), + NestedField(14, "some_empty", StringType(), required=False), + ) + + +@pytest.fixture +def data_file() -> DataFile: + return DataFile( + file_path="file_1.parquet", + file_format=FileFormat.PARQUET, + partition={}, + record_count=50, + file_size_in_bytes=3, + value_counts={ + 4: 50, + 5: 50, + 6: 50, + 7: 50, + 8: 50, + 9: 50, + 10: 50, + 11: 50, + 12: 50, + 13: 50, + 14: 50, + }, + null_value_counts={4: 50, 5: 10, 6: 0, 10: 50, 11: 0, 12: 1, 14: 8}, + nan_value_counts={ + 7: 50, + 8: 10, + 9: 0, + }, + lower_bounds={ + 1: to_bytes(IntegerType(), INT_MIN_VALUE), + 11: to_bytes(FloatType(), float("nan")), + 12: to_bytes(DoubleType(), float("nan")), + 14: to_bytes(StringType(), ""), + }, + upper_bounds={ + 1: to_bytes(IntegerType(), INT_MAX_VALUE), + 11: to_bytes(FloatType(), float("nan")), + 12: to_bytes(DoubleType(), float("nan")), + 14: to_bytes(StringType(), "房东整租霍营小区二层两居室"), + }, + ) + + +@pytest.fixture +def data_file_2() -> DataFile: + return DataFile( + file_path="file_2.parquet", + file_format=FileFormat.PARQUET, + partition={}, + record_count=50, + file_size_in_bytes=3, + value_counts={3: 20}, + null_value_counts={3: 2}, + nan_value_counts=None, + lower_bounds={3: to_bytes(StringType(), "aa")}, + upper_bounds={3: to_bytes(StringType(), "dC")}, + ) + + +@pytest.fixture +def data_file_3() -> DataFile: + return DataFile( + file_path="file_3.parquet", + file_format=FileFormat.PARQUET, + partition={}, + record_count=50, + file_size_in_bytes=3, + value_counts={3: 20}, + null_value_counts={3: 2}, + nan_value_counts=None, + lower_bounds={3: to_bytes(StringType(), "1str1")}, + upper_bounds={3: to_bytes(StringType(), "3str3")}, + ) + + +@pytest.fixture +def data_file_4() -> DataFile: + return DataFile( + file_path="file_4.parquet", + file_format=FileFormat.PARQUET, + partition={}, + record_count=50, + file_size_in_bytes=3, + value_counts={3: 20}, + null_value_counts={3: 2}, + nan_value_counts=None, + lower_bounds={3: to_bytes(StringType(), "abc")}, + upper_bounds={3: to_bytes(StringType(), "イロハニホヘト")}, + ) + + +def test_all_null(schema_data_file: Schema, data_file: DataFile) -> None: + should_read = _InclusiveMetricsEvaluator(schema_data_file, NotNull("all_nulls")).eval(data_file) + assert not should_read, "Should skip: no non-null value in all null column" + + should_read = _InclusiveMetricsEvaluator(schema_data_file, LessThan("all_nulls", "a")).eval(data_file) + assert not should_read, "Should skip: lessThan on all null column" + + should_read = _InclusiveMetricsEvaluator(schema_data_file, LessThanOrEqual("all_nulls", "a")).eval(data_file) + assert not should_read, "Should skip: lessThanOrEqual on all null column" + + should_read = _InclusiveMetricsEvaluator(schema_data_file, GreaterThan("all_nulls", "a")).eval(data_file) + assert not should_read, "Should skip: greaterThan on all null column" + + should_read = _InclusiveMetricsEvaluator(schema_data_file, GreaterThanOrEqual("all_nulls", "a")).eval(data_file) + assert not should_read, "Should skip: greaterThanOrEqual on all null column" + + should_read = _InclusiveMetricsEvaluator(schema_data_file, EqualTo("all_nulls", "a")).eval(data_file) + assert not should_read, "Should skip: equal on all null column" + + should_read = _InclusiveMetricsEvaluator(schema_data_file, NotNull("some_nulls")).eval(data_file) + assert should_read, "Should read: column with some nulls contains a non-null value" + + should_read = _InclusiveMetricsEvaluator(schema_data_file, NotNull("no_nulls")).eval(data_file) + assert should_read, "Should read: non-null column contains a non-null value" + + should_read = _InclusiveMetricsEvaluator(schema_data_file, StartsWith("all_nulls", "asad")).eval(data_file) + assert not should_read, "Should skip: startsWith on all null column" + + should_read = _InclusiveMetricsEvaluator(schema_data_file, NotStartsWith("all_nulls", "asad")).eval(data_file) + assert should_read, "Should read: notStartsWith on all null column" + + +def test_no_nulls(schema_data_file: Schema, data_file: DataFile) -> None: + should_read = _InclusiveMetricsEvaluator(schema_data_file, IsNull("all_nulls")).eval(data_file) + assert should_read, "Should read: at least one null value in all null column" + + should_read = _InclusiveMetricsEvaluator(schema_data_file, IsNull("some_nulls")).eval(data_file) + assert should_read, "Should read: column with some nulls contains a null value" + + should_read = _InclusiveMetricsEvaluator(schema_data_file, IsNull("no_nulls")).eval(data_file) + assert not should_read, "Should skip: non-null column contains no null values" + + +def test_is_nan(schema_data_file: Schema, data_file: DataFile) -> None: + should_read = _InclusiveMetricsEvaluator(schema_data_file, IsNaN("all_nans")).eval(data_file) + assert should_read, "Should read: at least one nan value in all nan column" + + should_read = _InclusiveMetricsEvaluator(schema_data_file, IsNaN("some_nans")).eval(data_file) + assert should_read, "Should read: at least one nan value in some nan column" + + should_read = _InclusiveMetricsEvaluator(schema_data_file, IsNaN("no_nans")).eval(data_file) + assert not should_read, "Should skip: no-nans column contains no nan values" + + should_read = _InclusiveMetricsEvaluator(schema_data_file, IsNaN("all_nulls_double")).eval(data_file) + assert not should_read, "Should skip: all-null column doesn't contain nan value" + + should_read = _InclusiveMetricsEvaluator(schema_data_file, IsNaN("no_nan_stats")).eval(data_file) + assert should_read, "Should read: no guarantee on if contains nan value without nan stats" + + should_read = _InclusiveMetricsEvaluator(schema_data_file, IsNaN("all_nans_v1_stats")).eval(data_file) + assert should_read, "Should read: at least one nan value in all nan column" + + should_read = _InclusiveMetricsEvaluator(schema_data_file, IsNaN("nan_and_null_only")).eval(data_file) + assert should_read, "Should read: at least one nan value in nan and nulls only column" + + +def test_not_nan(schema_data_file: Schema, data_file: DataFile) -> None: + should_read = _InclusiveMetricsEvaluator(schema_data_file, NotNaN("all_nans")).eval(data_file) + assert not should_read, "Should skip: column with all nans will not contain non-nan" + + should_read = _InclusiveMetricsEvaluator(schema_data_file, NotNaN("some_nans")).eval(data_file) + assert should_read, "Should read: at least one non-nan value in some nan column" + + should_read = _InclusiveMetricsEvaluator(schema_data_file, NotNaN("no_nans")).eval(data_file) + assert should_read, "Should read: at least one non-nan value in no nan column" + + should_read = _InclusiveMetricsEvaluator(schema_data_file, NotNaN("all_nulls_double")).eval(data_file) + assert should_read, "Should read: at least one non-nan value in all null column" + + should_read = _InclusiveMetricsEvaluator(schema_data_file, NotNaN("no_nan_stats")).eval(data_file) + assert should_read, "Should read: no guarantee on if contains nan value without nan stats" + + should_read = _InclusiveMetricsEvaluator(schema_data_file, NotNaN("all_nans_v1_stats")).eval(data_file) + assert should_read, "Should read: no guarantee on if contains nan value without nan stats" + + should_read = _InclusiveMetricsEvaluator(schema_data_file, NotNaN("nan_and_null_only")).eval(data_file) + assert should_read, "Should read: at least one null value in nan and nulls only column" + + +def test_required_column(schema_data_file: Schema, data_file: DataFile) -> None: + should_read = _InclusiveMetricsEvaluator(schema_data_file, NotNull("required")).eval(data_file) + assert should_read, "Should read: required columns are always non-null" + + should_read = _InclusiveMetricsEvaluator(schema_data_file, IsNull("required")).eval(data_file) + assert not should_read, "Should skip: required columns are always non-null" + + +def test_missing_column(schema_data_file: Schema, data_file: DataFile) -> None: + with pytest.raises(ValueError) as exc_info: + _ = _InclusiveMetricsEvaluator(schema_data_file, LessThan("missing", 22)).eval(data_file) + + assert str(exc_info.value) == "Could not find field with name missing, case_sensitive=True" + + +def test_missing_stats() -> None: + no_stats_schema = Schema( + NestedField(2, "no_stats", DoubleType(), required=False), + ) + + no_stats_file = DataFile( + file_path="file_1.parquet", + file_format=FileFormat.PARQUET, + partition={}, + record_count=50, + value_counts=None, + null_value_counts=None, + nan_value_counts=None, + lower_bounds=None, + upper_bounds=None, + ) + + expressions = [ + LessThan("no_stats", 5), + LessThanOrEqual("no_stats", 30), + EqualTo("no_stats", 70), + GreaterThan("no_stats", 78), + GreaterThanOrEqual("no_stats", 90), + NotEqualTo("no_stats", 101), + IsNull("no_stats"), + NotNull("no_stats"), + IsNaN("no_stats"), + NotNaN("no_stats"), + ] + + for expression in expressions: + should_read = _InclusiveMetricsEvaluator(no_stats_schema, expression).eval(no_stats_file) + assert should_read, f"Should read when stats are missing for: {expression}" + + +def test_zero_record_file_stats(schema_data_file: Schema) -> None: + zero_record_data_file = DataFile(file_path="file_1.parquet", file_format=FileFormat.PARQUET, partition={}, record_count=0) + + expressions = [ + LessThan("no_stats", 5), + LessThanOrEqual("no_stats", 30), + EqualTo("no_stats", 70), + GreaterThan("no_stats", 78), + GreaterThanOrEqual("no_stats", 90), + NotEqualTo("no_stats", 101), + IsNull("no_stats"), + NotNull("no_stats"), + IsNaN("no_stats"), + NotNaN("no_stats"), + ] + + for expression in expressions: + should_read = _InclusiveMetricsEvaluator(schema_data_file, expression).eval(zero_record_data_file) + assert not should_read, f"Should skip a datafile without records: {expression}" + + +def test_not(schema_data_file: Schema, data_file: DataFile) -> None: + should_read = _InclusiveMetricsEvaluator(schema_data_file, Not(LessThan("id", INT_MIN_VALUE - 25))).eval(data_file) + assert should_read, "Should read: not(false)" + + should_read = _InclusiveMetricsEvaluator(schema_data_file, Not(GreaterThan("id", INT_MIN_VALUE - 25))).eval(data_file) + assert not should_read, "Should skip: not(true)" + + +def test_and(schema_data_file: Schema, data_file: DataFile) -> None: + should_read = _InclusiveMetricsEvaluator( + schema_data_file, And(LessThan("id", INT_MIN_VALUE - 25), GreaterThanOrEqual("id", INT_MIN_VALUE - 30)) + ).eval(data_file) + assert not should_read, "Should skip: and(false, true)" + + should_read = _InclusiveMetricsEvaluator( + schema_data_file, And(LessThan("id", INT_MIN_VALUE - 25), GreaterThanOrEqual("id", INT_MIN_VALUE + 1)) + ).eval(data_file) + assert not should_read, "Should skip: and(false, false)" + + should_read = _InclusiveMetricsEvaluator( + schema_data_file, And(GreaterThan("id", INT_MIN_VALUE - 25), LessThanOrEqual("id", INT_MIN_VALUE)) + ).eval(data_file) + assert should_read, "Should read: and(true, true)" + + +def test_or(schema_data_file: Schema, data_file: DataFile) -> None: + should_read = _InclusiveMetricsEvaluator( + schema_data_file, Or(LessThan("id", INT_MIN_VALUE - 25), GreaterThanOrEqual("id", INT_MAX_VALUE + 1)) + ).eval(data_file) + assert not should_read, "Should skip: or(false, false)" + + should_read = _InclusiveMetricsEvaluator( + schema_data_file, Or(LessThan("id", INT_MIN_VALUE - 25), GreaterThanOrEqual("id", INT_MAX_VALUE - 19)) + ).eval(data_file) + assert should_read, "Should read: or(false, true)" + + +def test_integer_lt(schema_data_file: Schema, data_file: DataFile) -> None: + should_read = _InclusiveMetricsEvaluator(schema_data_file, LessThan("id", INT_MIN_VALUE - 25)).eval(data_file) + assert not should_read, "Should not read: id range below lower bound (5 < 30)" + + should_read = _InclusiveMetricsEvaluator(schema_data_file, LessThan("id", INT_MIN_VALUE)).eval(data_file) + assert not should_read, "Should not read: id range below lower bound (30 is not < 30)" + + should_read = _InclusiveMetricsEvaluator(schema_data_file, LessThan("id", INT_MIN_VALUE + 1)).eval(data_file) + assert should_read, "Should read: one possible id" + + should_read = _InclusiveMetricsEvaluator(schema_data_file, LessThan("id", INT_MAX_VALUE)).eval(data_file) + assert should_read, "Should read: may possible ids" + + +def test_integer_lt_eq(schema_data_file: Schema, data_file: DataFile) -> None: + should_read = _InclusiveMetricsEvaluator(schema_data_file, LessThanOrEqual("id", INT_MIN_VALUE - 25)).eval(data_file) + assert not should_read, "Should not read: id range below lower bound (5 < 30)" + + should_read = _InclusiveMetricsEvaluator(schema_data_file, LessThanOrEqual("id", INT_MIN_VALUE - 1)).eval(data_file) + assert not should_read, "Should not read: id range below lower bound (30 is not < 30)" + + should_read = _InclusiveMetricsEvaluator(schema_data_file, LessThanOrEqual("id", INT_MIN_VALUE)).eval(data_file) + assert should_read, "Should read: one possible id" + + should_read = _InclusiveMetricsEvaluator(schema_data_file, LessThanOrEqual("id", INT_MAX_VALUE)).eval(data_file) + assert should_read, "Should read: may possible ids" + + +def test_integer_gt(schema_data_file: Schema, data_file: DataFile) -> None: + should_read = _InclusiveMetricsEvaluator(schema_data_file, GreaterThan("id", INT_MAX_VALUE + 6)).eval(data_file) + assert not should_read, "Should not read: id range above upper bound (85 < 79)" + + should_read = _InclusiveMetricsEvaluator(schema_data_file, GreaterThan("id", INT_MAX_VALUE)).eval(data_file) + assert not should_read, "Should not read: id range above upper bound (79 is not > 79)" + + should_read = _InclusiveMetricsEvaluator(schema_data_file, GreaterThan("id", INT_MIN_VALUE - 1)).eval(data_file) + assert should_read, "Should read: one possible id" + + should_read = _InclusiveMetricsEvaluator(schema_data_file, GreaterThan("id", INT_MAX_VALUE - 4)).eval(data_file) + assert should_read, "Should read: may possible ids" + + +def test_integer_gt_eq(schema_data_file: Schema, data_file: DataFile) -> None: + should_read = _InclusiveMetricsEvaluator(schema_data_file, GreaterThanOrEqual("id", INT_MAX_VALUE + 6)).eval(data_file) + assert not should_read, "Should not read: id range above upper bound (85 < 79)" + + should_read = _InclusiveMetricsEvaluator(schema_data_file, GreaterThanOrEqual("id", INT_MAX_VALUE + 1)).eval(data_file) + assert not should_read, "Should not read: id range above upper bound (80 > 79)" + + should_read = _InclusiveMetricsEvaluator(schema_data_file, GreaterThanOrEqual("id", INT_MAX_VALUE)).eval(data_file) + assert should_read, "Should read: one possible id" + + should_read = _InclusiveMetricsEvaluator(schema_data_file, GreaterThanOrEqual("id", INT_MAX_VALUE - 4)).eval(data_file) + assert should_read, "Should read: may possible ids" + + +def test_integer_eq(schema_data_file: Schema, data_file: DataFile) -> None: + should_read = _InclusiveMetricsEvaluator(schema_data_file, EqualTo("id", INT_MIN_VALUE - 25)).eval(data_file) + assert not should_read, "Should not read: id below lower bound" + + should_read = _InclusiveMetricsEvaluator(schema_data_file, EqualTo("id", INT_MIN_VALUE - 1)).eval(data_file) + assert not should_read, "Should not read: id below lower bound" + + should_read = _InclusiveMetricsEvaluator(schema_data_file, EqualTo("id", INT_MIN_VALUE)).eval(data_file) + assert should_read, "Should read: id equal to lower bound" + + should_read = _InclusiveMetricsEvaluator(schema_data_file, EqualTo("id", INT_MAX_VALUE - 4)).eval(data_file) + assert should_read, "Should read: id between lower and upper bounds" + + should_read = _InclusiveMetricsEvaluator(schema_data_file, EqualTo("id", INT_MAX_VALUE)).eval(data_file) + assert should_read, "Should read: id equal to upper bound" + + should_read = _InclusiveMetricsEvaluator(schema_data_file, EqualTo("id", INT_MAX_VALUE + 1)).eval(data_file) + assert not should_read, "Should not read: id above upper bound" + + should_read = _InclusiveMetricsEvaluator(schema_data_file, EqualTo("id", INT_MAX_VALUE + 6)).eval(data_file) + assert not should_read, "Should not read: id above upper bound" + + +def test_integer_not_eq(schema_data_file: Schema, data_file: DataFile) -> None: + should_read = _InclusiveMetricsEvaluator(schema_data_file, NotEqualTo("id", INT_MIN_VALUE - 25)).eval(data_file) + assert should_read, "Should read: id below lower bound" + + should_read = _InclusiveMetricsEvaluator(schema_data_file, NotEqualTo("id", INT_MIN_VALUE - 1)).eval(data_file) + assert should_read, "Should read: id below lower bound" + + should_read = _InclusiveMetricsEvaluator(schema_data_file, NotEqualTo("id", INT_MIN_VALUE)).eval(data_file) + assert should_read, "Should read: id equal to lower bound" + + should_read = _InclusiveMetricsEvaluator(schema_data_file, NotEqualTo("id", INT_MAX_VALUE - 4)).eval(data_file) + assert should_read, "Should read: id between lower and upper bounds" + + should_read = _InclusiveMetricsEvaluator(schema_data_file, NotEqualTo("id", INT_MAX_VALUE)).eval(data_file) + assert should_read, "Should read: id equal to upper bound" + + should_read = _InclusiveMetricsEvaluator(schema_data_file, NotEqualTo("id", INT_MAX_VALUE + 1)).eval(data_file) + assert should_read, "Should read: id above upper bound" + + should_read = _InclusiveMetricsEvaluator(schema_data_file, NotEqualTo("id", INT_MAX_VALUE + 6)).eval(data_file) + assert should_read, "Should read: id above upper bound" + + +def test_integer_not_eq_rewritten(schema_data_file: Schema, data_file: DataFile) -> None: + should_read = _InclusiveMetricsEvaluator(schema_data_file, Not(EqualTo("id", INT_MIN_VALUE - 25))).eval(data_file) + assert should_read, "Should read: id below lower bound" + + should_read = _InclusiveMetricsEvaluator(schema_data_file, Not(EqualTo("id", INT_MIN_VALUE - 1))).eval(data_file) + assert should_read, "Should read: id below lower bound" + + should_read = _InclusiveMetricsEvaluator(schema_data_file, Not(EqualTo("id", INT_MIN_VALUE))).eval(data_file) + assert should_read, "Should read: id equal to lower bound" + + should_read = _InclusiveMetricsEvaluator(schema_data_file, Not(EqualTo("id", INT_MAX_VALUE - 4))).eval(data_file) + assert should_read, "Should read: id between lower and upper bounds" + + should_read = _InclusiveMetricsEvaluator(schema_data_file, Not(EqualTo("id", INT_MAX_VALUE))).eval(data_file) + assert should_read, "Should read: id equal to upper bound" + + should_read = _InclusiveMetricsEvaluator(schema_data_file, Not(EqualTo("id", INT_MAX_VALUE + 1))).eval(data_file) + assert should_read, "Should read: id above upper bound" + + should_read = _InclusiveMetricsEvaluator(schema_data_file, Not(EqualTo("id", INT_MAX_VALUE + 6))).eval(data_file) + assert should_read, "Should read: id above upper bound" + + +def test_integer_case_insensitive_not_eq_rewritten(schema_data_file: Schema, data_file: DataFile) -> None: + should_read = _InclusiveMetricsEvaluator(schema_data_file, Not(EqualTo("ID", INT_MIN_VALUE - 25)), case_sensitive=False).eval( + data_file + ) + assert should_read, "Should read: id below lower bound" + + should_read = _InclusiveMetricsEvaluator(schema_data_file, Not(EqualTo("ID", INT_MIN_VALUE - 1)), case_sensitive=False).eval( + data_file + ) + assert should_read, "Should read: id below lower bound" + + should_read = _InclusiveMetricsEvaluator(schema_data_file, Not(EqualTo("ID", INT_MIN_VALUE)), case_sensitive=False).eval( + data_file + ) + assert should_read, "Should read: id equal to lower bound" + + should_read = _InclusiveMetricsEvaluator(schema_data_file, Not(EqualTo("ID", INT_MAX_VALUE - 4)), case_sensitive=False).eval( + data_file + ) + assert should_read, "Should read: id between lower and upper bounds" + + should_read = _InclusiveMetricsEvaluator(schema_data_file, Not(EqualTo("ID", INT_MAX_VALUE)), case_sensitive=False).eval( + data_file + ) + assert should_read, "Should read: id equal to upper bound" + + should_read = _InclusiveMetricsEvaluator(schema_data_file, Not(EqualTo("ID", INT_MAX_VALUE + 1)), case_sensitive=False).eval( + data_file + ) + assert should_read, "Should read: id above upper bound" + + should_read = _InclusiveMetricsEvaluator(schema_data_file, Not(EqualTo("ID", INT_MAX_VALUE + 6)), case_sensitive=False).eval( + data_file + ) + assert should_read, "Should read: id above upper bound" + + +def test_missing_column_case_sensitive(schema_data_file: Schema, data_file: DataFile) -> None: + with pytest.raises(ValueError) as exc_info: + _ = _InclusiveMetricsEvaluator(schema_data_file, LessThan("ID", 22), case_sensitive=True).eval(data_file) + + assert str(exc_info.value) == "Could not find field with name ID, case_sensitive=True" + + +def test_integer_in(schema_data_file: Schema, data_file: DataFile) -> None: + should_read = _InclusiveMetricsEvaluator(schema_data_file, In("id", {INT_MIN_VALUE - 25, INT_MIN_VALUE - 24})).eval(data_file) + assert not should_read, "Should not read: id below lower bound (5 < 30, 6 < 30)" + + should_read = _InclusiveMetricsEvaluator(schema_data_file, In("id", {INT_MIN_VALUE - 2, INT_MIN_VALUE - 1})).eval(data_file) + assert not should_read, "Should not read: id below lower bound (28 < 30, 29 < 30)" + + should_read = _InclusiveMetricsEvaluator(schema_data_file, In("id", {INT_MIN_VALUE - 1, INT_MIN_VALUE})).eval(data_file) + assert should_read, "Should read: id equal to lower bound (30 == 30)" + + should_read = _InclusiveMetricsEvaluator(schema_data_file, In("id", {INT_MAX_VALUE - 4, INT_MAX_VALUE - 3})).eval(data_file) + assert should_read, "Should read: id between lower and upper bounds (30 < 75 < 79, 30 < 76 < 79)" + + should_read = _InclusiveMetricsEvaluator(schema_data_file, In("id", {INT_MAX_VALUE, INT_MAX_VALUE + 1})).eval(data_file) + assert should_read, "Should read: id equal to upper bound (79 == 79)" + + should_read = _InclusiveMetricsEvaluator(schema_data_file, In("id", {INT_MAX_VALUE + 1, INT_MAX_VALUE + 2})).eval(data_file) + assert not should_read, "Should not read: id above upper bound (80 > 79, 81 > 79)" + + should_read = _InclusiveMetricsEvaluator(schema_data_file, In("id", {INT_MAX_VALUE + 6, INT_MAX_VALUE + 7})).eval(data_file) + assert not should_read, "Should not read: id above upper bound (85 > 79, 86 > 79)" + + should_read = _InclusiveMetricsEvaluator(schema_data_file, In("all_nulls", {"abc", "def"})).eval(data_file) + assert not should_read, "Should skip: in on all nulls column" + + should_read = _InclusiveMetricsEvaluator(schema_data_file, In("some_nulls", {"abc", "def"})).eval(data_file) + assert should_read, "Should read: in on some nulls column" + + should_read = _InclusiveMetricsEvaluator(schema_data_file, In("no_nulls", {"abc", "def"})).eval(data_file) + assert should_read, "Should read: in on no nulls column" + + ids = list(range(400)) + should_read = _InclusiveMetricsEvaluator(schema_data_file, In("id", ids)).eval(data_file) + assert should_read, "Should read: large in expression" + + +def test_integer_not_in(schema_data_file: Schema, data_file: DataFile) -> None: + should_read = _InclusiveMetricsEvaluator(schema_data_file, NotIn("id", {INT_MIN_VALUE - 25, INT_MIN_VALUE - 24})).eval( + data_file + ) + assert should_read, "Should read: id below lower bound (5 < 30, 6 < 30)" + + should_read = _InclusiveMetricsEvaluator(schema_data_file, NotIn("id", {INT_MIN_VALUE - 2, INT_MIN_VALUE - 1})).eval( + data_file + ) + assert should_read, "Should read: id below lower bound (28 < 30, 29 < 30)" + + should_read = _InclusiveMetricsEvaluator(schema_data_file, NotIn("id", {INT_MIN_VALUE - 1, INT_MIN_VALUE})).eval(data_file) + assert should_read, "Should read: id equal to lower bound (30 == 30)" + + should_read = _InclusiveMetricsEvaluator(schema_data_file, NotIn("id", {INT_MAX_VALUE - 4, INT_MAX_VALUE - 3})).eval( + data_file + ) + assert should_read, "Should read: id between lower and upper bounds (30 < 75 < 79, 30 < 76 < 79)" + + should_read = _InclusiveMetricsEvaluator(schema_data_file, NotIn("id", {INT_MAX_VALUE, INT_MAX_VALUE + 1})).eval(data_file) + assert should_read, "Should read: id equal to upper bound (79 == 79)" + + should_read = _InclusiveMetricsEvaluator(schema_data_file, NotIn("id", {INT_MAX_VALUE + 1, INT_MAX_VALUE + 2})).eval( + data_file + ) + assert should_read, "Should read: id above upper bound (80 > 79, 81 > 79)" + + should_read = _InclusiveMetricsEvaluator(schema_data_file, NotIn("id", {INT_MAX_VALUE + 6, INT_MAX_VALUE + 7})).eval( + data_file + ) + assert should_read, "Should read: id above upper bound (85 > 79, 86 > 79)" + + should_read = _InclusiveMetricsEvaluator(schema_data_file, NotIn("all_nulls", {"abc", "def"})).eval(data_file) + assert should_read, "Should read: notIn on all nulls column" + + should_read = _InclusiveMetricsEvaluator(schema_data_file, NotIn("some_nulls", {"abc", "def"})).eval(data_file) + assert should_read, "Should read: in on some nulls column" + + should_read = _InclusiveMetricsEvaluator(schema_data_file, NotIn("no_nulls", {"abc", "def"})).eval(data_file) + assert should_read, "Should read: in on no nulls column" + + +@pytest.fixture +def schema_data_file_nan() -> Schema: + return Schema( + NestedField(1, "all_nan", DoubleType(), required=True), + NestedField(2, "max_nan", DoubleType(), required=True), + NestedField(3, "min_max_nan", FloatType(), required=False), + NestedField(4, "all_nan_null_bounds", DoubleType(), required=True), + NestedField(5, "some_nan_correct_bounds", FloatType(), required=False), + ) + + +@pytest.fixture +def data_file_nan() -> DataFile: + return DataFile( + file_path="file.avro", + file_format=FileFormat.PARQUET, + partition={}, + record_count=50, + file_size_in_bytes=3, + column_sizes={ + 1: 10, + 2: 10, + 3: 10, + 4: 10, + 5: 10, + }, + value_counts={ + 1: 10, + 2: 10, + 3: 10, + 4: 10, + 5: 10, + }, + null_value_counts={ + 1: 0, + 2: 0, + 3: 0, + 4: 0, + 5: 0, + }, + nan_value_counts={1: 10, 4: 10, 5: 5}, + lower_bounds={ + 1: to_bytes(DoubleType(), float("nan")), + 2: to_bytes(DoubleType(), 7), + 3: to_bytes(FloatType(), float("nan")), + 5: to_bytes(FloatType(), 7), + }, + upper_bounds={ + 1: to_bytes(DoubleType(), float("nan")), + 2: to_bytes(DoubleType(), float("nan")), + 3: to_bytes(FloatType(), float("nan")), + 5: to_bytes(FloatType(), 22), + }, + ) + + +def test_inclusive_metrics_evaluator_less_than_and_less_than_equal(schema_data_file_nan: Schema, data_file_nan: DataFile) -> None: + for operator in [LessThan, LessThanOrEqual]: + should_read = _InclusiveMetricsEvaluator(schema_data_file_nan, operator("all_nan", 1)).eval(data_file_nan) + assert not should_read, "Should not match: all nan column doesn't contain number" + + should_read = _InclusiveMetricsEvaluator(schema_data_file_nan, operator("max_nan", 1)).eval(data_file_nan) + assert not should_read, "Should not match: 1 is smaller than lower bound" + + should_read = _InclusiveMetricsEvaluator(schema_data_file_nan, operator("max_nan", 10)).eval(data_file_nan) + assert should_read, "Should match: 10 is larger than lower bound" + + should_read = _InclusiveMetricsEvaluator(schema_data_file_nan, operator("min_max_nan", 1)).eval(data_file_nan) + assert should_read, "Should match: no visibility" + + should_read = _InclusiveMetricsEvaluator(schema_data_file_nan, operator("all_nan_null_bounds", 1)).eval(data_file_nan) + assert not should_read, "Should not match: all nan column doesn't contain number" + + should_read = _InclusiveMetricsEvaluator(schema_data_file_nan, operator("some_nan_correct_bounds", 1)).eval(data_file_nan) + assert not should_read, "Should not match: 1 is smaller than lower bound" + + should_read = _InclusiveMetricsEvaluator(schema_data_file_nan, operator("some_nan_correct_bounds", 10)).eval( + data_file_nan + ) + assert should_read, "Should match: 10 larger than lower bound" + + +def test_inclusive_metrics_evaluator_greater_than_and_greater_than_equal( + schema_data_file_nan: Schema, data_file_nan: DataFile +) -> None: + for operator in [GreaterThan, GreaterThanOrEqual]: + should_read = _InclusiveMetricsEvaluator(schema_data_file_nan, operator("all_nan", 1)).eval(data_file_nan) + assert not should_read, "Should not match: all nan column doesn't contain number" + + should_read = _InclusiveMetricsEvaluator(schema_data_file_nan, operator("max_nan", 1)).eval(data_file_nan) + assert should_read, "Should match: upper bound is larger than 1" + + should_read = _InclusiveMetricsEvaluator(schema_data_file_nan, operator("max_nan", 10)).eval(data_file_nan) + assert should_read, "Should match: upper bound is larger than 10" + + should_read = _InclusiveMetricsEvaluator(schema_data_file_nan, operator("min_max_nan", 1)).eval(data_file_nan) + assert should_read, "Should match: no visibility" + + should_read = _InclusiveMetricsEvaluator(schema_data_file_nan, operator("all_nan_null_bounds", 1)).eval(data_file_nan) + assert not should_read, "Should not match: all nan column doesn't contain number" + + should_read = _InclusiveMetricsEvaluator(schema_data_file_nan, operator("some_nan_correct_bounds", 1)).eval(data_file_nan) + assert should_read, "Should match: 1 is smaller than upper bound" + + should_read = _InclusiveMetricsEvaluator(schema_data_file_nan, operator("some_nan_correct_bounds", 10)).eval( + data_file_nan + ) + assert should_read, "Should match: 10 is smaller than upper bound" + + should_read = _InclusiveMetricsEvaluator(schema_data_file_nan, operator("all_nan", 30)).eval(data_file_nan) + assert not should_read, "Should not match: 30 is greater than upper bound" + + +def test_inclusive_metrics_evaluator_equals(schema_data_file_nan: Schema, data_file_nan: DataFile) -> None: + should_read = _InclusiveMetricsEvaluator(schema_data_file_nan, EqualTo("all_nan", 1)).eval(data_file_nan) + assert not should_read, "Should not match: all nan column doesn't contain number" + + should_read = _InclusiveMetricsEvaluator(schema_data_file_nan, EqualTo("max_nan", 1)).eval(data_file_nan) + assert not should_read, "Should not match: 1 is smaller than lower bound" + + should_read = _InclusiveMetricsEvaluator(schema_data_file_nan, EqualTo("max_nan", 10)).eval(data_file_nan) + assert should_read, "Should match: 10 is within bounds" + + should_read = _InclusiveMetricsEvaluator(schema_data_file_nan, EqualTo("min_max_nan", 1)).eval(data_file_nan) + assert should_read, "Should match: no visibility" + + should_read = _InclusiveMetricsEvaluator(schema_data_file_nan, EqualTo("all_nan_null_bounds", 1)).eval(data_file_nan) + assert not should_read, "Should not match: all nan column doesn't contain number" + + should_read = _InclusiveMetricsEvaluator(schema_data_file_nan, EqualTo("some_nan_correct_bounds", 1)).eval(data_file_nan) + assert not should_read, "Should not match: 1 is smaller than lower bound" + + should_read = _InclusiveMetricsEvaluator(schema_data_file_nan, EqualTo("some_nan_correct_bounds", 10)).eval(data_file_nan) + assert should_read, "Should match: 10 is within bounds" + + should_read = _InclusiveMetricsEvaluator(schema_data_file_nan, EqualTo("all_nan", 30)).eval(data_file_nan) + assert not should_read, "Should not match: 30 is greater than upper bound" + + +def test_inclusive_metrics_evaluator_not_equals(schema_data_file_nan: Schema, data_file_nan: DataFile) -> None: + should_read = _InclusiveMetricsEvaluator(schema_data_file_nan, NotEqualTo("all_nan", 1)).eval(data_file_nan) + assert should_read, "Should match: no visibility" + + should_read = _InclusiveMetricsEvaluator(schema_data_file_nan, NotEqualTo("max_nan", 10)).eval(data_file_nan) + assert should_read, "Should match: no visibility" + + should_read = _InclusiveMetricsEvaluator(schema_data_file_nan, NotEqualTo("max_nan", 10)).eval(data_file_nan) + assert should_read, "Should match: no visibility" + + should_read = _InclusiveMetricsEvaluator(schema_data_file_nan, NotEqualTo("min_max_nan", 1)).eval(data_file_nan) + assert should_read, "Should match: no visibility" + + should_read = _InclusiveMetricsEvaluator(schema_data_file_nan, NotEqualTo("all_nan_null_bounds", 1)).eval(data_file_nan) + assert should_read, "Should match: no visibility" + + should_read = _InclusiveMetricsEvaluator(schema_data_file_nan, NotEqualTo("some_nan_correct_bounds", 1)).eval(data_file_nan) + assert should_read, "Should match: no visibility" + + should_read = _InclusiveMetricsEvaluator(schema_data_file_nan, NotEqualTo("some_nan_correct_bounds", 10)).eval(data_file_nan) + assert should_read, "Should match: no visibility" + + should_read = _InclusiveMetricsEvaluator(schema_data_file_nan, NotEqualTo("some_nan_correct_bounds", 30)).eval(data_file_nan) + assert should_read, "Should match: no visibility" + + +def test_inclusive_metrics_evaluator_in(schema_data_file_nan: Schema, data_file_nan: DataFile) -> None: + should_read = _InclusiveMetricsEvaluator(schema_data_file_nan, In("all_nan", (1, 10, 30))).eval(data_file_nan) + assert not should_read, "Should not match: all nan column doesn't contain number" + + should_read = _InclusiveMetricsEvaluator(schema_data_file_nan, In("max_nan", (1, 10, 30))).eval(data_file_nan) + assert should_read, "Should match: 10 and 30 are greater than lower bound" + + should_read = _InclusiveMetricsEvaluator(schema_data_file_nan, In("min_max_nan", (1, 10, 30))).eval(data_file_nan) + assert should_read, "Should match: no visibility" + + should_read = _InclusiveMetricsEvaluator(schema_data_file_nan, In("all_nan_null_bounds", (1, 10, 30))).eval(data_file_nan) + assert not should_read, "Should not match: all nan column doesn't contain number" + + should_read = _InclusiveMetricsEvaluator(schema_data_file_nan, In("some_nan_correct_bounds", (1, 10, 30))).eval(data_file_nan) + assert should_read, "Should match: 10 within bounds" + + should_read = _InclusiveMetricsEvaluator(schema_data_file_nan, In("some_nan_correct_bounds", (1, 30))).eval(data_file_nan) + assert not should_read, "Should not match: 1 and 30 not within bounds" + + should_read = _InclusiveMetricsEvaluator(schema_data_file_nan, In("some_nan_correct_bounds", (5, 7))).eval(data_file_nan) + assert should_read, "Should match: overlap with lower bound" + + should_read = _InclusiveMetricsEvaluator(schema_data_file_nan, In("some_nan_correct_bounds", (22, 25))).eval(data_file_nan) + assert should_read, "Should match: overlap with upper bounds" + + +def test_inclusive_metrics_evaluator_not_in(schema_data_file_nan: Schema, data_file_nan: DataFile) -> None: + should_read = _InclusiveMetricsEvaluator(schema_data_file_nan, NotIn("all_nan", (1, 10, 30))).eval(data_file_nan) + assert should_read, "Should match: no visibility" + + should_read = _InclusiveMetricsEvaluator(schema_data_file_nan, NotIn("max_nan", (1, 10, 30))).eval(data_file_nan) + assert should_read, "Should match: no visibility" + + should_read = _InclusiveMetricsEvaluator(schema_data_file_nan, NotIn("max_nan", (1, 10, 30))).eval(data_file_nan) + assert should_read, "Should match: no visibility" + + should_read = _InclusiveMetricsEvaluator(schema_data_file_nan, NotIn("min_max_nan", (1, 10, 30))).eval(data_file_nan) + assert should_read, "Should match: no visibility" + + should_read = _InclusiveMetricsEvaluator(schema_data_file_nan, NotIn("all_nan_null_bounds", (1, 10, 30))).eval(data_file_nan) + assert should_read, "Should match: no visibility" + + should_read = _InclusiveMetricsEvaluator(schema_data_file_nan, NotIn("some_nan_correct_bounds", (1, 30))).eval(data_file_nan) + assert should_read, "Should match: no visibility" + + should_read = _InclusiveMetricsEvaluator(schema_data_file_nan, NotIn("some_nan_correct_bounds", (1, 30))).eval(data_file_nan) + assert should_read, "Should match: no visibility" + + +def test_string_starts_with( + schema_data_file: Schema, data_file: DataFile, data_file_2: DataFile, data_file_3: DataFile, data_file_4: DataFile +) -> None: + should_read = _InclusiveMetricsEvaluator(schema_data_file, StartsWith("required", "a")).eval(data_file) + assert should_read, "Should read: no stats" + + should_read = _InclusiveMetricsEvaluator(schema_data_file, StartsWith("required", "a")).eval(data_file_2) + assert should_read, "Should read: range matches" + + should_read = _InclusiveMetricsEvaluator(schema_data_file, StartsWith("required", "aa")).eval(data_file_2) + assert should_read, "Should read: range matches" + + should_read = _InclusiveMetricsEvaluator(schema_data_file, StartsWith("required", "aaa")).eval(data_file_2) + assert should_read, "Should read: range matches" + + should_read = _InclusiveMetricsEvaluator(schema_data_file, StartsWith("required", "1s")).eval(data_file_3) + assert should_read, "Should read: range matches" + + should_read = _InclusiveMetricsEvaluator(schema_data_file, StartsWith("required", "1str1x")).eval(data_file_3) + assert should_read, "Should read: range matches" + + should_read = _InclusiveMetricsEvaluator(schema_data_file, StartsWith("required", "ff")).eval(data_file_4) + assert should_read, "Should read: range matches" + + should_read = _InclusiveMetricsEvaluator(schema_data_file, StartsWith("required", "aB")).eval(data_file_2) + assert not should_read, "Should not read: range doesn't match" + + should_read = _InclusiveMetricsEvaluator(schema_data_file, StartsWith("required", "dWX")).eval(data_file_2) + assert not should_read, "Should not read: range doesn't match" + + should_read = _InclusiveMetricsEvaluator(schema_data_file, StartsWith("required", "5")).eval(data_file_3) + assert not should_read, "Should not read: range doesn't match" + + should_read = _InclusiveMetricsEvaluator(schema_data_file, StartsWith("required", "3str3x")).eval(data_file_3) + assert not should_read, "Should not read: range doesn't match" + + should_read = _InclusiveMetricsEvaluator(schema_data_file, StartsWith("some_empty", "房东整租霍")).eval(data_file) + assert should_read, "Should read: range matches" + + should_read = _InclusiveMetricsEvaluator(schema_data_file, StartsWith("all_nulls", "")).eval(data_file) + assert not should_read, "Should not read: range doesn't match" + + # above_max = UnicodeUtil.truncateStringMax(Literal.of("イロハニホヘト"), 4).value().toString(); + + # should_read = _InclusiveMetricsEvaluator(schema_data_file, StartsWith("required", above_max)).eval(data_file_4) + # assert not should_read, "Should not read: range doesn't match" + + +def test_string_not_starts_with( + schema_data_file: Schema, data_file: DataFile, data_file_2: DataFile, data_file_3: DataFile, data_file_4: DataFile +) -> None: + should_read = _InclusiveMetricsEvaluator(schema_data_file, NotStartsWith("required", "a")).eval(data_file) + assert should_read, "Should read: no stats" + + should_read = _InclusiveMetricsEvaluator(schema_data_file, NotStartsWith("required", "a")).eval(data_file_2) + assert should_read, "Should read: range matches" + + should_read = _InclusiveMetricsEvaluator(schema_data_file, NotStartsWith("required", "aa")).eval(data_file_2) + assert should_read, "Should read: range matches" + + should_read = _InclusiveMetricsEvaluator(schema_data_file, NotStartsWith("required", "aaa")).eval(data_file_2) + assert should_read, "Should read: range matches" + + should_read = _InclusiveMetricsEvaluator(schema_data_file, NotStartsWith("required", "1s")).eval(data_file_3) + assert should_read, "Should read: range matches" + + should_read = _InclusiveMetricsEvaluator(schema_data_file, NotStartsWith("required", "1str1x")).eval(data_file_3) + assert should_read, "Should read: range matches" + + should_read = _InclusiveMetricsEvaluator(schema_data_file, NotStartsWith("required", "ff")).eval(data_file_4) + assert should_read, "Should read: range matches" + + should_read = _InclusiveMetricsEvaluator(schema_data_file, NotStartsWith("required", "aB")).eval(data_file_2) + assert should_read, "Should not read: range doesn't match" + + should_read = _InclusiveMetricsEvaluator(schema_data_file, NotStartsWith("required", "dWX")).eval(data_file_2) + assert should_read, "Should not read: range doesn't match" + + should_read = _InclusiveMetricsEvaluator(schema_data_file, NotStartsWith("required", "5")).eval(data_file_3) + assert should_read, "Should not read: range doesn't match" + + should_read = _InclusiveMetricsEvaluator(schema_data_file, NotStartsWith("required", "3str3x")).eval(data_file_3) + assert should_read, "Should not read: range doesn't match" + + # above_max = UnicodeUtil.truncateStringMax(Literal.of("イロハニホヘト"), 4).value().toString(); + + # should_read = _InclusiveMetricsEvaluator(schema_data_file, NotStartsWith("required", above_max)).eval(data_file_4) + # assert should_read, "Should not read: range doesn't match" diff --git a/tests/expressions/test_expressions.py b/tests/expressions/test_expressions.py new file mode 100644 index 0000000000..bd3a14165e --- /dev/null +++ b/tests/expressions/test_expressions.py @@ -0,0 +1,1161 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# pylint:disable=redefined-outer-name,eval-used + +import pickle +import uuid +from decimal import Decimal +from typing import Any + +import pytest +from typing_extensions import assert_type + +from pyiceberg.expressions import ( + AlwaysFalse, + AlwaysTrue, + And, + BooleanExpression, + BoundEqualTo, + BoundGreaterThan, + BoundGreaterThanOrEqual, + BoundIn, + BoundIsNaN, + BoundIsNull, + BoundLessThan, + BoundLessThanOrEqual, + BoundNotEqualTo, + BoundNotIn, + BoundNotNaN, + BoundNotNull, + BoundReference, + EqualTo, + GreaterThan, + GreaterThanOrEqual, + In, + IsNaN, + IsNull, + LessThan, + LessThanOrEqual, + Not, + NotEqualTo, + NotIn, + NotNaN, + NotNull, + Or, + Reference, + UnboundPredicate, +) +from pyiceberg.expressions.literals import Literal, literal +from pyiceberg.expressions.visitors import _from_byte_buffer +from pyiceberg.schema import Accessor, Schema +from pyiceberg.typedef import Record +from pyiceberg.types import ( + BinaryType, + BooleanType, + DecimalType, + DoubleType, + FloatType, + IntegerType, + ListType, + LongType, + NestedField, + StringType, + StructType, + UUIDType, +) +from pyiceberg.utils.singleton import Singleton + + +class ExpressionA(BooleanExpression, Singleton): + def __invert__(self) -> BooleanExpression: + return ExpressionB() + + def __repr__(self) -> str: + return "ExpressionA()" + + def __str__(self) -> str: + return "testexpra" + + +class ExpressionB(BooleanExpression, Singleton): + def __invert__(self) -> BooleanExpression: + return ExpressionA() + + def __repr__(self) -> str: + return "ExpressionB()" + + def __str__(self) -> str: + return "testexprb" + + +def test_isnull_inverse() -> None: + assert ~IsNull(Reference("a")) == NotNull(Reference("a")) + + +def test_isnull_bind() -> None: + schema = Schema(NestedField(2, "a", IntegerType()), schema_id=1) + bound = BoundIsNull(BoundReference(schema.find_field(2), schema.accessor_for_field(2))) + assert IsNull(Reference("a")).bind(schema) == bound + + +def test_invert_is_null_bind() -> None: + schema = Schema(NestedField(2, "a", IntegerType(), required=False), schema_id=1) + assert ~IsNull(Reference("a")).bind(schema) == NotNull(Reference("a")).bind(schema) + + +def test_invert_not_null_bind() -> None: + schema = Schema(NestedField(2, "a", IntegerType(), required=False), schema_id=1) + assert ~NotNull(Reference("a")).bind(schema) == IsNull(Reference("a")).bind(schema) + + +def test_invert_is_nan_bind() -> None: + schema = Schema(NestedField(2, "a", DoubleType(), required=False), schema_id=1) + assert ~IsNaN(Reference("a")).bind(schema) == NotNaN(Reference("a")).bind(schema) + + +def test_invert_not_nan_bind() -> None: + schema = Schema(NestedField(2, "a", DoubleType(), required=False), schema_id=1) + assert ~NotNaN(Reference("a")).bind(schema) == IsNaN(Reference("a")).bind(schema) + + +def test_bind_expr_does_not_exists() -> None: + schema = Schema(NestedField(2, "a", IntegerType()), schema_id=1) + with pytest.raises(ValueError) as exc_info: + IsNull(Reference("b")).bind(schema) + + assert str(exc_info.value) == "Could not find field with name b, case_sensitive=True" + + +def test_bind_does_not_exists() -> None: + schema = Schema(NestedField(2, "a", IntegerType()), schema_id=1) + with pytest.raises(ValueError) as exc_info: + Reference("b").bind(schema) + + assert str(exc_info.value) == "Could not find field with name b, case_sensitive=True" + + +def test_isnull_bind_required() -> None: + schema = Schema(NestedField(2, "a", IntegerType(), required=True), schema_id=1) + assert IsNull(Reference("a")).bind(schema) == AlwaysFalse() + + +def test_notnull_inverse() -> None: + assert ~NotNull(Reference("a")) == IsNull(Reference("a")) + + +def test_notnull_bind() -> None: + schema = Schema(NestedField(2, "a", IntegerType()), schema_id=1) + bound = BoundNotNull(BoundReference(schema.find_field(2), schema.accessor_for_field(2))) + assert NotNull(Reference("a")).bind(schema) == bound + + +def test_notnull_bind_required() -> None: + schema = Schema(NestedField(2, "a", IntegerType(), required=True), schema_id=1) + assert NotNull(Reference("a")).bind(schema) == AlwaysTrue() + + +def test_isnan_inverse() -> None: + assert ~IsNaN(Reference("f")) == NotNaN(Reference("f")) + + +def test_isnan_bind_float() -> None: + schema = Schema(NestedField(2, "f", FloatType()), schema_id=1) + bound = BoundIsNaN(BoundReference(schema.find_field(2), schema.accessor_for_field(2))) + assert IsNaN(Reference("f")).bind(schema) == bound + + +def test_isnan_bind_double() -> None: + schema = Schema(NestedField(2, "d", DoubleType()), schema_id=1) + bound = BoundIsNaN(BoundReference(schema.find_field(2), schema.accessor_for_field(2))) + assert IsNaN(Reference("d")).bind(schema) == bound + + +def test_isnan_bind_nonfloat() -> None: + schema = Schema(NestedField(2, "i", IntegerType()), schema_id=1) + assert IsNaN(Reference("i")).bind(schema) == AlwaysFalse() + + +def test_notnan_inverse() -> None: + assert ~NotNaN(Reference("f")) == IsNaN(Reference("f")) + + +def test_notnan_bind_float() -> None: + schema = Schema(NestedField(2, "f", FloatType()), schema_id=1) + bound = BoundNotNaN(BoundReference(schema.find_field(2), schema.accessor_for_field(2))) + assert NotNaN(Reference("f")).bind(schema) == bound + + +def test_notnan_bind_double() -> None: + schema = Schema(NestedField(2, "d", DoubleType()), schema_id=1) + bound = BoundNotNaN(BoundReference(schema.find_field(2), schema.accessor_for_field(2))) + assert NotNaN(Reference("d")).bind(schema) == bound + + +def test_notnan_bind_nonfloat() -> None: + schema = Schema(NestedField(2, "i", IntegerType()), schema_id=1) + assert NotNaN(Reference("i")).bind(schema) == AlwaysTrue() + + +def test_ref_binding_case_sensitive(table_schema_simple: Schema) -> None: + ref = Reference("foo") + bound = BoundReference(table_schema_simple.find_field(1), table_schema_simple.accessor_for_field(1)) + assert ref.bind(table_schema_simple, case_sensitive=True) == bound + + +def test_ref_binding_case_sensitive_failure(table_schema_simple: Schema) -> None: + ref = Reference("Foo") + with pytest.raises(ValueError): + ref.bind(table_schema_simple, case_sensitive=True) + + +def test_ref_binding_case_insensitive(table_schema_simple: Schema) -> None: + ref = Reference("Foo") + bound = BoundReference(table_schema_simple.find_field(1), table_schema_simple.accessor_for_field(1)) + assert ref.bind(table_schema_simple, case_sensitive=False) == bound + + +def test_ref_binding_case_insensitive_failure(table_schema_simple: Schema) -> None: + ref = Reference("Foot") + with pytest.raises(ValueError): + ref.bind(table_schema_simple, case_sensitive=False) + + +def test_in_to_eq() -> None: + assert In("x", (34.56,)) == EqualTo("x", 34.56) + + +def test_empty_bind_in(table_schema_simple: Schema) -> None: + bound = BoundIn(BoundReference(table_schema_simple.find_field(1), table_schema_simple.accessor_for_field(1)), set()) + assert bound == AlwaysFalse() + + +def test_empty_bind_not_in(table_schema_simple: Schema) -> None: + bound = BoundNotIn(BoundReference(table_schema_simple.find_field(1), table_schema_simple.accessor_for_field(1)), set()) + assert bound == AlwaysTrue() + + +def test_bind_not_in_equal_term(table_schema_simple: Schema) -> None: + bound = BoundNotIn( + BoundReference(table_schema_simple.find_field(1), table_schema_simple.accessor_for_field(1)), {literal("hello")} + ) + assert ( + BoundNotEqualTo( + term=BoundReference( + field=NestedField(field_id=1, name="foo", field_type=StringType(), required=False), + accessor=Accessor(position=0, inner=None), + ), + literal=literal("hello"), + ) + == bound + ) + + +def test_in_empty() -> None: + assert In(Reference("foo"), ()) == AlwaysFalse() + + +def test_in_set() -> None: + assert In(Reference("foo"), {"a", "bc", "def"}).literals == {literal("a"), literal("bc"), literal("def")} + + +def test_in_tuple() -> None: + assert In(Reference("foo"), ("a", "bc", "def")).literals == {literal("a"), literal("bc"), literal("def")} + + +def test_in_list() -> None: + assert In(Reference("foo"), ["a", "bc", "def"]).literals == {literal("a"), literal("bc"), literal("def")} + + +def test_not_in_empty() -> None: + assert NotIn(Reference("foo"), ()) == AlwaysTrue() + + +def test_not_in_equal() -> None: + assert NotIn(Reference("foo"), ("hello",)) == NotEqualTo(term=Reference(name="foo"), literal="hello") + + +def test_bind_in(table_schema_simple: Schema) -> None: + bound = BoundIn( + BoundReference(table_schema_simple.find_field(1), table_schema_simple.accessor_for_field(1)), + {literal("hello"), literal("world")}, + ) + assert In(Reference("foo"), ("hello", "world")).bind(table_schema_simple) == bound + + +def test_bind_in_invert(table_schema_simple: Schema) -> None: + bound = BoundIn( + BoundReference(table_schema_simple.find_field(1), table_schema_simple.accessor_for_field(1)), + {literal("hello"), literal("world")}, + ) + assert ~bound == BoundNotIn( + BoundReference(table_schema_simple.find_field(1), table_schema_simple.accessor_for_field(1)), + {literal("hello"), literal("world")}, + ) + + +def test_bind_not_in_invert(table_schema_simple: Schema) -> None: + bound = BoundNotIn( + BoundReference(table_schema_simple.find_field(1), table_schema_simple.accessor_for_field(1)), + {literal("hello"), literal("world")}, + ) + assert ~bound == BoundIn( + BoundReference(table_schema_simple.find_field(1), table_schema_simple.accessor_for_field(1)), + {literal("hello"), literal("world")}, + ) + + +def test_bind_dedup(table_schema_simple: Schema) -> None: + bound = BoundIn( + BoundReference(table_schema_simple.find_field(1), table_schema_simple.accessor_for_field(1)), + {literal("hello"), literal("world")}, + ) + assert In(Reference("foo"), ("hello", "world", "world")).bind(table_schema_simple) == bound + + +def test_bind_dedup_to_eq(table_schema_simple: Schema) -> None: + bound = BoundEqualTo( + BoundReference(table_schema_simple.find_field(1), table_schema_simple.accessor_for_field(1)), literal("hello") + ) + assert In(Reference("foo"), ("hello", "hello")).bind(table_schema_simple) == bound + + +def test_bound_equal_to_invert(table_schema_simple: Schema) -> None: + bound = BoundEqualTo( + BoundReference(table_schema_simple.find_field(1), table_schema_simple.accessor_for_field(1)), literal("hello") + ) + assert ~bound == BoundNotEqualTo( + term=BoundReference( + field=NestedField(field_id=1, name="foo", field_type=StringType(), required=False), + accessor=Accessor(position=0, inner=None), + ), + literal=literal("hello"), + ) + + +def test_bound_not_equal_to_invert(table_schema_simple: Schema) -> None: + bound = BoundNotEqualTo( + BoundReference(table_schema_simple.find_field(1), table_schema_simple.accessor_for_field(1)), literal("hello") + ) + assert ~bound == BoundEqualTo( + term=BoundReference( + field=NestedField(field_id=1, name="foo", field_type=StringType(), required=False), + accessor=Accessor(position=0, inner=None), + ), + literal=literal("hello"), + ) + + +def test_bound_greater_than_or_equal_invert(table_schema_simple: Schema) -> None: + bound = BoundGreaterThanOrEqual( + BoundReference(table_schema_simple.find_field(1), table_schema_simple.accessor_for_field(1)), literal("hello") + ) + assert ~bound == BoundLessThan( + term=BoundReference( + field=NestedField(field_id=1, name="foo", field_type=StringType(), required=False), + accessor=Accessor(position=0, inner=None), + ), + literal=literal("hello"), + ) + + +def test_bound_greater_than_invert(table_schema_simple: Schema) -> None: + bound = BoundGreaterThan( + BoundReference(table_schema_simple.find_field(1), table_schema_simple.accessor_for_field(1)), literal("hello") + ) + assert ~bound == BoundLessThanOrEqual( + term=BoundReference( + field=NestedField(field_id=1, name="foo", field_type=StringType(), required=False), + accessor=Accessor(position=0, inner=None), + ), + literal=literal("hello"), + ) + + +def test_bound_less_than_invert(table_schema_simple: Schema) -> None: + bound = BoundLessThan( + BoundReference(table_schema_simple.find_field(1), table_schema_simple.accessor_for_field(1)), literal("hello") + ) + assert ~bound == BoundGreaterThanOrEqual( + term=BoundReference( + field=NestedField(field_id=1, name="foo", field_type=StringType(), required=False), + accessor=Accessor(position=0, inner=None), + ), + literal=literal("hello"), + ) + + +def test_bound_less_than_or_equal_invert(table_schema_simple: Schema) -> None: + bound = BoundLessThanOrEqual( + BoundReference(table_schema_simple.find_field(1), table_schema_simple.accessor_for_field(1)), literal("hello") + ) + assert ~bound == BoundGreaterThan( + term=BoundReference( + field=NestedField(field_id=1, name="foo", field_type=StringType(), required=False), + accessor=Accessor(position=0, inner=None), + ), + literal=literal("hello"), + ) + + +def test_not_equal_to_invert() -> None: + bound = NotEqualTo( + term=BoundReference( # type: ignore + field=NestedField(field_id=1, name="foo", field_type=StringType(), required=False), + accessor=Accessor(position=0, inner=None), + ), + literal="hello", + ) + assert ~bound == EqualTo( + term=BoundReference( # type: ignore + field=NestedField(field_id=1, name="foo", field_type=StringType(), required=False), + accessor=Accessor(position=0, inner=None), + ), + literal="hello", + ) + + +def test_greater_than_or_equal_invert() -> None: + bound = GreaterThanOrEqual( + term=BoundReference( # type: ignore + field=NestedField(field_id=1, name="foo", field_type=StringType(), required=False), + accessor=Accessor(position=0, inner=None), + ), + literal="hello", + ) + assert ~bound == LessThan( + term=BoundReference( # type: ignore + field=NestedField(field_id=1, name="foo", field_type=StringType(), required=False), + accessor=Accessor(position=0, inner=None), + ), + literal="hello", + ) + + +def test_less_than_or_equal_invert() -> None: + bound = LessThanOrEqual( + term=BoundReference( # type: ignore + field=NestedField(field_id=1, name="foo", field_type=StringType(), required=False), + accessor=Accessor(position=0, inner=None), + ), + literal="hello", + ) + assert ~bound == GreaterThan( + term=BoundReference( # type: ignore + field=NestedField(field_id=1, name="foo", field_type=StringType(), required=False), + accessor=Accessor(position=0, inner=None), + ), + literal="hello", + ) + + +@pytest.mark.parametrize( + "pred", + [ + NotIn(Reference("foo"), ("hello", "world")), + NotEqualTo(Reference("foo"), "hello"), + EqualTo(Reference("foo"), "hello"), + GreaterThan(Reference("foo"), "hello"), + LessThan(Reference("foo"), "hello"), + GreaterThanOrEqual(Reference("foo"), "hello"), + LessThanOrEqual(Reference("foo"), "hello"), + ], +) +def test_bind(pred: UnboundPredicate[Any], table_schema_simple: Schema) -> None: + assert pred.bind(table_schema_simple, case_sensitive=True).term.field == table_schema_simple.find_field( # type: ignore + pred.term.name, case_sensitive=True # type: ignore + ) + + +@pytest.mark.parametrize( + "pred", + [ + In(Reference("Bar"), (5, 2)), + NotIn(Reference("Bar"), (5, 2)), + NotEqualTo(Reference("Bar"), 5), + EqualTo(Reference("Bar"), 5), + GreaterThan(Reference("Bar"), 5), + LessThan(Reference("Bar"), 5), + GreaterThanOrEqual(Reference("Bar"), 5), + LessThanOrEqual(Reference("Bar"), 5), + ], +) +def test_bind_case_insensitive(pred: UnboundPredicate[Any], table_schema_simple: Schema) -> None: + assert pred.bind(table_schema_simple, case_sensitive=False).term.field == table_schema_simple.find_field( # type: ignore + pred.term.name, case_sensitive=False # type: ignore + ) + + +@pytest.mark.parametrize( + "exp, testexpra, testexprb", + [ + ( + And(ExpressionA(), ExpressionB()), + And(ExpressionA(), ExpressionB()), + Or(ExpressionA(), ExpressionB()), + ), + ( + Or(ExpressionA(), ExpressionB()), + Or(ExpressionA(), ExpressionB()), + And(ExpressionA(), ExpressionB()), + ), + (Not(ExpressionA()), Not(ExpressionA()), ExpressionB()), + (ExpressionA(), ExpressionA(), ExpressionB()), + (ExpressionB(), ExpressionB(), ExpressionA()), + ( + In(Reference("foo"), ("hello", "world")), + In(Reference("foo"), ("hello", "world")), + In(Reference("not_foo"), ("hello", "world")), + ), + ( + In(Reference("foo"), ("hello", "world")), + In(Reference("foo"), ("hello", "world")), + In(Reference("foo"), ("goodbye", "world")), + ), + ], +) +def test_eq(exp: BooleanExpression, testexpra: BooleanExpression, testexprb: BooleanExpression) -> None: + assert exp == testexpra and exp != testexprb + + +@pytest.mark.parametrize( + "lhs, rhs", + [ + ( + And(ExpressionA(), ExpressionB()), + Or(ExpressionB(), ExpressionA()), + ), + ( + Or(ExpressionA(), ExpressionB()), + And(ExpressionB(), ExpressionA()), + ), + ( + Not(ExpressionA()), + ExpressionA(), + ), + ( + In(Reference("foo"), ("hello", "world")), + NotIn(Reference("foo"), ("hello", "world")), + ), + ( + NotIn(Reference("foo"), ("hello", "world")), + In(Reference("foo"), ("hello", "world")), + ), + (GreaterThan(Reference("foo"), 5), LessThanOrEqual(Reference("foo"), 5)), + (LessThan(Reference("foo"), 5), GreaterThanOrEqual(Reference("foo"), 5)), + (EqualTo(Reference("foo"), 5), NotEqualTo(Reference("foo"), 5)), + ( + ExpressionA(), + ExpressionB(), + ), + ], +) +def test_negate(lhs: BooleanExpression, rhs: BooleanExpression) -> None: + assert ~lhs == rhs + + +@pytest.mark.parametrize( + "lhs, rhs", + [ + ( + And(ExpressionA(), ExpressionB(), ExpressionA()), + And(And(ExpressionA(), ExpressionB()), ExpressionA()), + ), + ( + Or(ExpressionA(), ExpressionB(), ExpressionA()), + Or(Or(ExpressionA(), ExpressionB()), ExpressionA()), + ), + (Not(Not(ExpressionA())), ExpressionA()), + ], +) +def test_reduce(lhs: BooleanExpression, rhs: BooleanExpression) -> None: + assert lhs == rhs + + +@pytest.mark.parametrize( + "lhs, rhs", + [ + (And(AlwaysTrue(), ExpressionB()), ExpressionB()), + (And(AlwaysFalse(), ExpressionB()), AlwaysFalse()), + (And(ExpressionB(), AlwaysTrue()), ExpressionB()), + (Or(AlwaysTrue(), ExpressionB()), AlwaysTrue()), + (Or(AlwaysFalse(), ExpressionB()), ExpressionB()), + (Or(ExpressionA(), AlwaysFalse()), ExpressionA()), + (Not(Not(ExpressionA())), ExpressionA()), + (Not(AlwaysTrue()), AlwaysFalse()), + (Not(AlwaysFalse()), AlwaysTrue()), + ], +) +def test_base_AlwaysTrue_base_AlwaysFalse(lhs: BooleanExpression, rhs: BooleanExpression) -> None: + assert lhs == rhs + + +def test_invert_always() -> None: + assert ~AlwaysFalse() == AlwaysTrue() + assert ~AlwaysTrue() == AlwaysFalse() + + +def test_accessor_base_class() -> None: + """Test retrieving a value at a position of a container using an accessor""" + + struct = Record( + struct=StructType( + NestedField(1, "a", StringType()), + NestedField(2, "b", StringType()), + NestedField(3, "c", StringType()), + NestedField(4, "d", IntegerType()), + NestedField(5, "e", IntegerType()), + NestedField(6, "f", IntegerType()), + NestedField(7, "g", FloatType()), + NestedField(8, "h", DecimalType(8, 4)), + NestedField(9, "i", UUIDType()), + NestedField(10, "j", BooleanType()), + NestedField(11, "k", BooleanType()), + NestedField(12, "l", BinaryType()), + ) + ) + + uuid_value = uuid.uuid4() + + struct[0] = "foo" + struct[1] = "bar" + struct[2] = "baz" + struct[3] = 1 + struct[4] = 2 + struct[5] = 3 + struct[6] = 1.234 + struct[7] = Decimal("1.234") + struct[8] = uuid_value + struct[9] = True + struct[10] = False + struct[11] = b"\x19\x04\x9e?" + + assert Accessor(position=0).get(struct) == "foo" + assert Accessor(position=1).get(struct) == "bar" + assert Accessor(position=2).get(struct) == "baz" + assert Accessor(position=3).get(struct) == 1 + assert Accessor(position=4).get(struct) == 2 + assert Accessor(position=5).get(struct) == 3 + assert Accessor(position=6).get(struct) == 1.234 + assert Accessor(position=7).get(struct) == Decimal("1.234") + assert Accessor(position=8).get(struct) == uuid_value + assert Accessor(position=9).get(struct) is True + assert Accessor(position=10).get(struct) is False + assert Accessor(position=11).get(struct) == b"\x19\x04\x9e?" + + +@pytest.fixture +def field() -> NestedField: + return NestedField(field_id=1, name="foo", field_type=StringType(), required=False) + + +@pytest.fixture +def accessor() -> Accessor: + return Accessor(position=1) + + +@pytest.fixture +def term(field: NestedField, accessor: Accessor) -> BoundReference[Any]: + return BoundReference( + field=field, + accessor=accessor, + ) + + +def test_bound_reference(field: NestedField, accessor: Accessor) -> None: + bound_ref = BoundReference(field=field, accessor=accessor) + assert str(bound_ref) == f"BoundReference(field={repr(field)}, accessor={repr(accessor)})" + assert repr(bound_ref) == f"BoundReference(field={repr(field)}, accessor={repr(accessor)})" + assert bound_ref == eval(repr(bound_ref)) + assert bound_ref == pickle.loads(pickle.dumps(bound_ref)) + + +def test_reference() -> None: + abc = "abc" + ref = Reference(abc) + assert str(ref) == "Reference(name='abc')" + assert repr(ref) == "Reference(name='abc')" + assert ref == eval(repr(ref)) + assert ref == pickle.loads(pickle.dumps(ref)) + + +def test_and() -> None: + null = IsNull(Reference("a")) + nan = IsNaN(Reference("b")) + and_ = And(null, nan) + assert str(and_) == f"And(left={str(null)}, right={str(nan)})" + assert repr(and_) == f"And(left={repr(null)}, right={repr(nan)})" + assert and_ == eval(repr(and_)) + assert and_ == pickle.loads(pickle.dumps(and_)) + + +def test_or() -> None: + null = IsNull(Reference("a")) + nan = IsNaN(Reference("b")) + or_ = Or(null, nan) + assert str(or_) == f"Or(left={str(null)}, right={str(nan)})" + assert repr(or_) == f"Or(left={repr(null)}, right={repr(nan)})" + assert or_ == eval(repr(or_)) + assert or_ == pickle.loads(pickle.dumps(or_)) + + +def test_not() -> None: + null = IsNull(Reference("a")) + not_ = Not(null) + assert str(not_) == f"Not(child={str(null)})" + assert repr(not_) == f"Not(child={repr(null)})" + assert not_ == eval(repr(not_)) + assert not_ == pickle.loads(pickle.dumps(not_)) + + +def test_always_true() -> None: + always_true = AlwaysTrue() + assert str(always_true) == "AlwaysTrue()" + assert repr(always_true) == "AlwaysTrue()" + assert always_true == eval(repr(always_true)) + assert always_true == pickle.loads(pickle.dumps(always_true)) + + +def test_always_false() -> None: + always_false = AlwaysFalse() + assert str(always_false) == "AlwaysFalse()" + assert repr(always_false) == "AlwaysFalse()" + assert always_false == eval(repr(always_false)) + assert always_false == pickle.loads(pickle.dumps(always_false)) + + +def test_bound_reference_field_property() -> None: + field = NestedField(field_id=1, name="foo", field_type=StringType(), required=False) + position1_accessor = Accessor(position=1) + bound_ref = BoundReference(field=field, accessor=position1_accessor) + assert bound_ref.field == NestedField(field_id=1, name="foo", field_type=StringType(), required=False) + + +def test_bound_is_null(term: BoundReference[Any]) -> None: + bound_is_null = BoundIsNull(term) + assert str(bound_is_null) == f"BoundIsNull(term={str(term)})" + assert repr(bound_is_null) == f"BoundIsNull(term={repr(term)})" + assert bound_is_null == eval(repr(bound_is_null)) + + +def test_bound_is_not_null(term: BoundReference[Any]) -> None: + bound_not_null = BoundNotNull(term) + assert str(bound_not_null) == f"BoundNotNull(term={str(term)})" + assert repr(bound_not_null) == f"BoundNotNull(term={repr(term)})" + assert bound_not_null == eval(repr(bound_not_null)) + + +def test_is_null() -> None: + ref = Reference("a") + is_null = IsNull(ref) + assert str(is_null) == f"IsNull(term={str(ref)})" + assert repr(is_null) == f"IsNull(term={repr(ref)})" + assert is_null == eval(repr(is_null)) + assert is_null == pickle.loads(pickle.dumps(is_null)) + + +def test_not_null() -> None: + ref = Reference("a") + non_null = NotNull(ref) + assert str(non_null) == f"NotNull(term={str(ref)})" + assert repr(non_null) == f"NotNull(term={repr(ref)})" + assert non_null == eval(repr(non_null)) + assert non_null == pickle.loads(pickle.dumps(non_null)) + + +def test_bound_is_nan(accessor: Accessor) -> None: + # We need a FloatType here + term = BoundReference[float]( + field=NestedField(field_id=1, name="foo", field_type=FloatType(), required=False), + accessor=accessor, + ) + bound_is_nan = BoundIsNaN(term) + assert str(bound_is_nan) == f"BoundIsNaN(term={str(term)})" + assert repr(bound_is_nan) == f"BoundIsNaN(term={repr(term)})" + assert bound_is_nan == eval(repr(bound_is_nan)) + assert bound_is_nan == pickle.loads(pickle.dumps(bound_is_nan)) + + +def test_bound_is_not_nan(accessor: Accessor) -> None: + # We need a FloatType here + term = BoundReference[float]( + field=NestedField(field_id=1, name="foo", field_type=FloatType(), required=False), + accessor=accessor, + ) + bound_not_nan = BoundNotNaN(term) + assert str(bound_not_nan) == f"BoundNotNaN(term={str(term)})" + assert repr(bound_not_nan) == f"BoundNotNaN(term={repr(term)})" + assert bound_not_nan == eval(repr(bound_not_nan)) + assert bound_not_nan == pickle.loads(pickle.dumps(bound_not_nan)) + + +def test_is_nan() -> None: + ref = Reference("a") + is_nan = IsNaN(ref) + assert str(is_nan) == f"IsNaN(term={str(ref)})" + assert repr(is_nan) == f"IsNaN(term={repr(ref)})" + assert is_nan == eval(repr(is_nan)) + assert is_nan == pickle.loads(pickle.dumps(is_nan)) + + +def test_not_nan() -> None: + ref = Reference("a") + not_nan = NotNaN(ref) + assert str(not_nan) == f"NotNaN(term={str(ref)})" + assert repr(not_nan) == f"NotNaN(term={repr(ref)})" + assert not_nan == eval(repr(not_nan)) + assert not_nan == pickle.loads(pickle.dumps(not_nan)) + + +def test_bound_in(term: BoundReference[Any]) -> None: + bound_in = BoundIn(term, {literal("a"), literal("b"), literal("c")}) + assert str(bound_in) == f"BoundIn({str(term)}, {{a, b, c}})" + assert repr(bound_in) == f"BoundIn({repr(term)}, {{literal('a'), literal('b'), literal('c')}})" + assert bound_in == eval(repr(bound_in)) + assert bound_in == pickle.loads(pickle.dumps(bound_in)) + + +def test_bound_not_in(term: BoundReference[Any]) -> None: + bound_not_in = BoundNotIn(term, {literal("a"), literal("b"), literal("c")}) + assert str(bound_not_in) == f"BoundNotIn({str(term)}, {{a, b, c}})" + assert repr(bound_not_in) == f"BoundNotIn({repr(term)}, {{literal('a'), literal('b'), literal('c')}})" + assert bound_not_in == eval(repr(bound_not_in)) + assert bound_not_in == pickle.loads(pickle.dumps(bound_not_in)) + + +def test_in() -> None: + ref = Reference("a") + unbound_in = In(ref, {"a", "b", "c"}) + assert str(unbound_in) == f"In({str(ref)}, {{a, b, c}})" + assert repr(unbound_in) == f"In({repr(ref)}, {{literal('a'), literal('b'), literal('c')}})" + assert unbound_in == eval(repr(unbound_in)) + assert unbound_in == pickle.loads(pickle.dumps(unbound_in)) + + +def test_not_in() -> None: + ref = Reference("a") + not_in = NotIn(ref, {"a", "b", "c"}) + assert str(not_in) == f"NotIn({str(ref)}, {{a, b, c}})" + assert repr(not_in) == f"NotIn({repr(ref)}, {{literal('a'), literal('b'), literal('c')}})" + assert not_in == eval(repr(not_in)) + assert not_in == pickle.loads(pickle.dumps(not_in)) + + +def test_bound_equal_to(term: BoundReference[Any]) -> None: + bound_equal_to = BoundEqualTo(term, literal("a")) + assert str(bound_equal_to) == f"BoundEqualTo(term={str(term)}, literal=literal('a'))" + assert repr(bound_equal_to) == f"BoundEqualTo(term={repr(term)}, literal=literal('a'))" + assert bound_equal_to == eval(repr(bound_equal_to)) + assert bound_equal_to == pickle.loads(pickle.dumps(bound_equal_to)) + + +def test_bound_not_equal_to(term: BoundReference[Any]) -> None: + bound_not_equal_to = BoundNotEqualTo(term, literal("a")) + assert str(bound_not_equal_to) == f"BoundNotEqualTo(term={str(term)}, literal=literal('a'))" + assert repr(bound_not_equal_to) == f"BoundNotEqualTo(term={repr(term)}, literal=literal('a'))" + assert bound_not_equal_to == eval(repr(bound_not_equal_to)) + assert bound_not_equal_to == pickle.loads(pickle.dumps(bound_not_equal_to)) + + +def test_bound_greater_than_or_equal_to(term: BoundReference[Any]) -> None: + bound_greater_than_or_equal_to = BoundGreaterThanOrEqual(term, literal("a")) + assert str(bound_greater_than_or_equal_to) == f"BoundGreaterThanOrEqual(term={str(term)}, literal=literal('a'))" + assert repr(bound_greater_than_or_equal_to) == f"BoundGreaterThanOrEqual(term={repr(term)}, literal=literal('a'))" + assert bound_greater_than_or_equal_to == eval(repr(bound_greater_than_or_equal_to)) + assert bound_greater_than_or_equal_to == pickle.loads(pickle.dumps(bound_greater_than_or_equal_to)) + + +def test_bound_greater_than(term: BoundReference[Any]) -> None: + bound_greater_than = BoundGreaterThan(term, literal("a")) + assert str(bound_greater_than) == f"BoundGreaterThan(term={str(term)}, literal=literal('a'))" + assert repr(bound_greater_than) == f"BoundGreaterThan(term={repr(term)}, literal=literal('a'))" + assert bound_greater_than == eval(repr(bound_greater_than)) + assert bound_greater_than == pickle.loads(pickle.dumps(bound_greater_than)) + + +def test_bound_less_than(term: BoundReference[Any]) -> None: + bound_less_than = BoundLessThan(term, literal("a")) + assert str(bound_less_than) == f"BoundLessThan(term={str(term)}, literal=literal('a'))" + assert repr(bound_less_than) == f"BoundLessThan(term={repr(term)}, literal=literal('a'))" + assert bound_less_than == eval(repr(bound_less_than)) + assert bound_less_than == pickle.loads(pickle.dumps(bound_less_than)) + + +def test_bound_less_than_or_equal(term: BoundReference[Any]) -> None: + bound_less_than_or_equal = BoundLessThanOrEqual(term, literal("a")) + assert str(bound_less_than_or_equal) == f"BoundLessThanOrEqual(term={str(term)}, literal=literal('a'))" + assert repr(bound_less_than_or_equal) == f"BoundLessThanOrEqual(term={repr(term)}, literal=literal('a'))" + assert bound_less_than_or_equal == eval(repr(bound_less_than_or_equal)) + assert bound_less_than_or_equal == pickle.loads(pickle.dumps(bound_less_than_or_equal)) + + +def test_equal_to() -> None: + equal_to = EqualTo(Reference("a"), literal("a")) + assert str(equal_to) == "EqualTo(term=Reference(name='a'), literal=literal('a'))" + assert repr(equal_to) == "EqualTo(term=Reference(name='a'), literal=literal('a'))" + assert equal_to == eval(repr(equal_to)) + assert equal_to == pickle.loads(pickle.dumps(equal_to)) + + +def test_not_equal_to() -> None: + not_equal_to = NotEqualTo(Reference("a"), literal("a")) + assert str(not_equal_to) == "NotEqualTo(term=Reference(name='a'), literal=literal('a'))" + assert repr(not_equal_to) == "NotEqualTo(term=Reference(name='a'), literal=literal('a'))" + assert not_equal_to == eval(repr(not_equal_to)) + assert not_equal_to == pickle.loads(pickle.dumps(not_equal_to)) + + +def test_greater_than_or_equal_to() -> None: + greater_than_or_equal_to = GreaterThanOrEqual(Reference("a"), literal("a")) + assert str(greater_than_or_equal_to) == "GreaterThanOrEqual(term=Reference(name='a'), literal=literal('a'))" + assert repr(greater_than_or_equal_to) == "GreaterThanOrEqual(term=Reference(name='a'), literal=literal('a'))" + assert greater_than_or_equal_to == eval(repr(greater_than_or_equal_to)) + assert greater_than_or_equal_to == pickle.loads(pickle.dumps(greater_than_or_equal_to)) + + +def test_greater_than() -> None: + greater_than = GreaterThan(Reference("a"), literal("a")) + assert str(greater_than) == "GreaterThan(term=Reference(name='a'), literal=literal('a'))" + assert repr(greater_than) == "GreaterThan(term=Reference(name='a'), literal=literal('a'))" + assert greater_than == eval(repr(greater_than)) + assert greater_than == pickle.loads(pickle.dumps(greater_than)) + + +def test_less_than() -> None: + less_than = LessThan(Reference("a"), literal("a")) + assert str(less_than) == "LessThan(term=Reference(name='a'), literal=literal('a'))" + assert repr(less_than) == "LessThan(term=Reference(name='a'), literal=literal('a'))" + assert less_than == eval(repr(less_than)) + assert less_than == pickle.loads(pickle.dumps(less_than)) + + +def test_less_than_or_equal() -> None: + less_than_or_equal = LessThanOrEqual(Reference("a"), literal("a")) + assert str(less_than_or_equal) == "LessThanOrEqual(term=Reference(name='a'), literal=literal('a'))" + assert repr(less_than_or_equal) == "LessThanOrEqual(term=Reference(name='a'), literal=literal('a'))" + assert less_than_or_equal == eval(repr(less_than_or_equal)) + assert less_than_or_equal == pickle.loads(pickle.dumps(less_than_or_equal)) + + +def test_bound_reference_eval(table_schema_simple: Schema) -> None: + """Test creating a BoundReference and evaluating it on a StructProtocol""" + struct = Record(struct=table_schema_simple.as_struct()) + + struct[0] = "foovalue" + struct[1] = 123 + struct[2] = True + + position1_accessor = Accessor(position=0) + position2_accessor = Accessor(position=1) + position3_accessor = Accessor(position=2) + + field1 = table_schema_simple.find_field(1) + field2 = table_schema_simple.find_field(2) + field3 = table_schema_simple.find_field(3) + + bound_ref1 = BoundReference(field=field1, accessor=position1_accessor) + bound_ref2 = BoundReference(field=field2, accessor=position2_accessor) + bound_ref3 = BoundReference(field=field3, accessor=position3_accessor) + + assert bound_ref1.eval(struct) == "foovalue" + assert bound_ref2.eval(struct) == 123 + assert bound_ref3.eval(struct) is True + + +def test_non_primitive_from_byte_buffer() -> None: + with pytest.raises(ValueError) as exc_info: + _ = _from_byte_buffer(ListType(element_id=1, element_type=StringType()), b"\0x00") + + assert str(exc_info.value) == "Expected a PrimitiveType, got: " + + +def test_string_argument_unbound_unary() -> None: + assert IsNull("a") == IsNull(Reference("a")) + + +def test_string_argument_unbound_literal() -> None: + assert EqualTo("a", "b") == EqualTo(Reference("a"), "b") + + +def test_string_argument_unbound_set() -> None: + assert In("a", {"b", "c"}) == In(Reference("a"), {"b", "c"}) + + +@pytest.fixture +def int_schema() -> Schema: + return Schema(NestedField(field_id=1, name="a", field_type=IntegerType(), required=False)) + + +@pytest.fixture +def above_int_max() -> Literal[int]: + return literal(IntegerType.max + 1) + + +@pytest.fixture +def below_int_min() -> Literal[int]: + return literal(IntegerType.min - 1) + + +def test_above_int_bounds_equal_to(int_schema: Schema, above_int_max: Literal[int], below_int_min: Literal[int]) -> None: + assert EqualTo[int]("a", above_int_max).bind(int_schema) is AlwaysFalse() + assert EqualTo[int]("a", below_int_min).bind(int_schema) is AlwaysFalse() + + +def test_above_int_bounds_not_equal_to(int_schema: Schema, above_int_max: Literal[int], below_int_min: Literal[int]) -> None: + assert NotEqualTo[int]("a", above_int_max).bind(int_schema) is AlwaysTrue() + assert NotEqualTo[int]("a", below_int_min).bind(int_schema) is AlwaysTrue() + + +def test_above_int_bounds_less_than(int_schema: Schema, above_int_max: Literal[int], below_int_min: Literal[int]) -> None: + assert LessThan[int]("a", above_int_max).bind(int_schema) is AlwaysTrue() + assert LessThan[int]("a", below_int_min).bind(int_schema) is AlwaysFalse() + + +def test_above_int_bounds_less_than_or_equal( + int_schema: Schema, above_int_max: Literal[int], below_int_min: Literal[int] +) -> None: + assert LessThanOrEqual[int]("a", above_int_max).bind(int_schema) is AlwaysTrue() + assert LessThanOrEqual[int]("a", below_int_min).bind(int_schema) is AlwaysFalse() + + +def test_above_int_bounds_greater_than(int_schema: Schema, above_int_max: Literal[int], below_int_min: Literal[int]) -> None: + assert GreaterThan[int]("a", above_int_max).bind(int_schema) is AlwaysFalse() + assert GreaterThan[int]("a", below_int_min).bind(int_schema) is AlwaysTrue() + + +def test_above_int_bounds_greater_than_or_equal( + int_schema: Schema, above_int_max: Literal[int], below_int_min: Literal[int] +) -> None: + assert GreaterThanOrEqual[int]("a", above_int_max).bind(int_schema) is AlwaysFalse() + assert GreaterThanOrEqual[int]("a", below_int_min).bind(int_schema) is AlwaysTrue() + + +@pytest.fixture +def float_schema() -> Schema: + return Schema(NestedField(field_id=1, name="a", field_type=FloatType(), required=False)) + + +@pytest.fixture +def above_float_max() -> Literal[float]: + return literal(FloatType.max * 2) + + +@pytest.fixture +def below_float_min() -> Literal[float]: + return literal(FloatType.min * 2) + + +def test_above_float_bounds_equal_to( + float_schema: Schema, above_float_max: Literal[float], below_float_min: Literal[float] +) -> None: + assert EqualTo[float]("a", above_float_max).bind(float_schema) is AlwaysFalse() + assert EqualTo[float]("a", below_float_min).bind(float_schema) is AlwaysFalse() + + +def test_above_float_bounds_not_equal_to( + float_schema: Schema, above_float_max: Literal[float], below_float_min: Literal[float] +) -> None: + assert NotEqualTo[float]("a", above_float_max).bind(float_schema) is AlwaysTrue() + assert NotEqualTo[float]("a", below_float_min).bind(float_schema) is AlwaysTrue() + + +def test_above_float_bounds_less_than( + float_schema: Schema, above_float_max: Literal[float], below_float_min: Literal[float] +) -> None: + assert LessThan[float]("a", above_float_max).bind(float_schema) is AlwaysTrue() + assert LessThan[float]("a", below_float_min).bind(float_schema) is AlwaysFalse() + + +def test_above_float_bounds_less_than_or_equal( + float_schema: Schema, above_float_max: Literal[float], below_float_min: Literal[float] +) -> None: + assert LessThanOrEqual[float]("a", above_float_max).bind(float_schema) is AlwaysTrue() + assert LessThanOrEqual[float]("a", below_float_min).bind(float_schema) is AlwaysFalse() + + +def test_above_float_bounds_greater_than( + float_schema: Schema, above_float_max: Literal[float], below_float_min: Literal[float] +) -> None: + assert GreaterThan[float]("a", above_float_max).bind(float_schema) is AlwaysFalse() + assert GreaterThan[float]("a", below_float_min).bind(float_schema) is AlwaysTrue() + + +def test_above_float_bounds_greater_than_or_equal( + float_schema: Schema, above_float_max: Literal[float], below_float_min: Literal[float] +) -> None: + assert GreaterThanOrEqual[float]("a", above_float_max).bind(float_schema) is AlwaysFalse() + assert GreaterThanOrEqual[float]("a", below_float_min).bind(float_schema) is AlwaysTrue() + + +@pytest.fixture +def long_schema() -> Schema: + return Schema(NestedField(field_id=1, name="a", field_type=LongType(), required=False)) + + +@pytest.fixture +def above_long_max() -> Literal[float]: + return literal(LongType.max + 1) + + +@pytest.fixture +def below_long_min() -> Literal[float]: + return literal(LongType.min - 1) + + +def test_above_long_bounds_equal_to(long_schema: Schema, above_long_max: Literal[int], below_long_min: Literal[int]) -> None: + assert EqualTo[int]("a", above_long_max).bind(long_schema) is AlwaysFalse() + assert EqualTo[int]("a", below_long_min).bind(long_schema) is AlwaysFalse() + + +def test_above_long_bounds_not_equal_to(long_schema: Schema, above_long_max: Literal[int], below_long_min: Literal[int]) -> None: + assert NotEqualTo[int]("a", above_long_max).bind(long_schema) is AlwaysTrue() + assert NotEqualTo[int]("a", below_long_min).bind(long_schema) is AlwaysTrue() + + +def test_above_long_bounds_less_than(long_schema: Schema, above_long_max: Literal[int], below_long_min: Literal[int]) -> None: + assert LessThan[int]("a", above_long_max).bind(long_schema) is AlwaysTrue() + assert LessThan[int]("a", below_long_min).bind(long_schema) is AlwaysFalse() + + +def test_above_long_bounds_less_than_or_equal( + long_schema: Schema, above_long_max: Literal[int], below_long_min: Literal[int] +) -> None: + assert LessThanOrEqual[int]("a", above_long_max).bind(long_schema) is AlwaysTrue() + assert LessThanOrEqual[int]("a", below_long_min).bind(long_schema) is AlwaysFalse() + + +def test_above_long_bounds_greater_than(long_schema: Schema, above_long_max: Literal[int], below_long_min: Literal[int]) -> None: + assert GreaterThan[int]("a", above_long_max).bind(long_schema) is AlwaysFalse() + assert GreaterThan[int]("a", below_long_min).bind(long_schema) is AlwaysTrue() + + +def test_above_long_bounds_greater_than_or_equal( + long_schema: Schema, above_long_max: Literal[int], below_long_min: Literal[int] +) -> None: + assert GreaterThanOrEqual[int]("a", above_long_max).bind(long_schema) is AlwaysFalse() + assert GreaterThanOrEqual[int]("a", below_long_min).bind(long_schema) is AlwaysTrue() + + +# __ __ ___ +# | \/ |_ _| _ \_ _ +# | |\/| | || | _/ || | +# |_| |_|\_, |_| \_, | +# |__/ |__/ + +assert_type(EqualTo("a", "b"), EqualTo[str]) +assert_type(In("a", ("a", "b", "c")), In[str]) +assert_type(In("a", (1, 2, 3)), In[int]) +assert_type(NotIn("a", ("a", "b", "c")), NotIn[str]) diff --git a/tests/expressions/test_literals.py b/tests/expressions/test_literals.py new file mode 100644 index 0000000000..309bd28c4c --- /dev/null +++ b/tests/expressions/test_literals.py @@ -0,0 +1,897 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# pylint:disable=eval-used + +import datetime +import uuid +from decimal import Decimal +from typing import ( + Any, + List, + Set, + Type, +) + +import pytest +from typing_extensions import assert_type + +from pyiceberg.expressions.literals import ( + BinaryLiteral, + BooleanLiteral, + DateLiteral, + DecimalLiteral, + DoubleLiteral, + FixedLiteral, + FloatAboveMax, + FloatBelowMin, + FloatLiteral, + IntAboveMax, + IntBelowMin, + Literal, + LongLiteral, + StringLiteral, + TimeLiteral, + TimestampLiteral, + literal, +) +from pyiceberg.types import ( + BinaryType, + BooleanType, + DateType, + DecimalType, + DoubleType, + FixedType, + FloatType, + IntegerType, + LongType, + PrimitiveType, + StringType, + TimestampType, + TimestamptzType, + TimeType, + UUIDType, +) + + +def test_literal_from_none_error() -> None: + with pytest.raises(TypeError) as e: + literal(None) # type: ignore + assert "Invalid literal value: None" in str(e.value) + + +def test_literal_from_nan_error() -> None: + with pytest.raises(ValueError) as e: + literal(float("nan")) + assert "Cannot create expression literal from NaN." in str(e.value) + + +@pytest.mark.parametrize( + "literal_class", + [ + BooleanLiteral, + LongLiteral, + FloatLiteral, + DoubleLiteral, + DateLiteral, + TimeLiteral, + TimestampLiteral, + DecimalLiteral, + StringLiteral, + FixedLiteral, + BinaryLiteral, + ], +) +def test_literal_classes_with_none_type_error(literal_class: Type[PrimitiveType]) -> None: + with pytest.raises(TypeError) as e: + literal_class(None) + assert "Invalid literal value: None" in str(e.value) + + +@pytest.mark.parametrize("literal_class", [FloatLiteral, DoubleLiteral]) +def test_literal_classes_with_nan_value_error(literal_class: Type[PrimitiveType]) -> None: + with pytest.raises(ValueError) as e: + literal_class(float("nan")) + assert "Cannot create expression literal from NaN." in str(e.value) + + +# Numeric + + +def test_numeric_literal_comparison() -> None: + small_lit = literal(10).to(IntegerType()) + big_lit = literal(1000).to(IntegerType()) + assert small_lit != big_lit + assert small_lit == literal(10) + assert small_lit < big_lit + assert small_lit <= big_lit + assert big_lit > small_lit + assert big_lit >= small_lit + + +def test_integer_to_long_conversion() -> None: + lit = literal(34).to(IntegerType()) + long_lit = lit.to(LongType()) + + assert lit.value == long_lit.value + + +def test_integer_to_float_conversion() -> None: + lit = literal(34).to(IntegerType()) + float_lit = lit.to(FloatType()) + + assert lit.value == float_lit.value + + +def test_integer_to_double_conversion() -> None: + lit = literal(34).to(IntegerType()) + dbl_lit = lit.to(DoubleType()) + + assert lit.value == dbl_lit.value + + +@pytest.mark.parametrize( + "decimal_type, decimal_value", [(DecimalType(9, 0), "34"), (DecimalType(9, 2), "34.00"), (DecimalType(9, 4), "34.0000")] +) +def test_integer_to_decimal_conversion(decimal_type: DecimalType, decimal_value: str) -> None: + lit = literal(34).to(IntegerType()) + + assert lit.to(decimal_type).value.as_tuple() == Decimal(decimal_value).as_tuple() # type: ignore + + +def test_integer_to_date_conversion() -> None: + one_day = "2022-03-28" + date_delta = (datetime.date.fromisoformat(one_day) - datetime.date.fromisoformat("1970-01-01")).days + date_lit = literal(date_delta).to(DateType()) + + assert isinstance(date_lit, DateLiteral) + assert date_lit.value == date_delta + + +def test_long_to_integer_within_bound() -> None: + lit = literal(34).to(LongType()) + int_lit = lit.to(IntegerType()) + + assert lit.value == int_lit.value + + +def test_long_to_integer_outside_bound() -> None: + big_lit = literal(IntegerType.max + 1).to(LongType()) + above_max_lit = big_lit.to(IntegerType()) + assert above_max_lit == IntAboveMax() + + small_lit = literal(IntegerType.min - 1).to(LongType()) + below_min_lit = small_lit.to(IntegerType()) + assert below_min_lit == IntBelowMin() + + +def test_long_to_float_conversion() -> None: + lit = literal(34).to(LongType()) + float_lit = lit.to(FloatType()) + + assert lit.value == float_lit.value + + +def test_long_to_double_conversion() -> None: + lit = literal(34).to(LongType()) + dbl_lit = lit.to(DoubleType()) + + assert lit.value == dbl_lit.value + + +def test_long_to_time() -> None: + long_lit = literal(51661919000).to(LongType()) + time_lit = long_lit.to(TimeType()) + + assert isinstance(time_lit, TimeLiteral) + assert time_lit.value == long_lit.value + + +def test_long_to_timestamp() -> None: + long_lit = literal(1647305201).to(LongType()) + timestamp_lit = long_lit.to(TimestampType()) + + assert timestamp_lit.value == long_lit.value + + +@pytest.mark.parametrize( + "decimal_type, decimal_value", [(DecimalType(9, 0), "34"), (DecimalType(9, 2), "34.00"), (DecimalType(9, 4), "34.0000")] +) +def test_long_to_decimal_conversion(decimal_type: DecimalType, decimal_value: str) -> None: + lit = literal(34).to(LongType()) + + assert lit.to(decimal_type).value.as_tuple() == Decimal(decimal_value).as_tuple() # type: ignore + + +def test_float_to_double() -> None: + lit = literal(34.56).to(FloatType()) + dbl_lit = lit.to(DoubleType()) + + assert lit.value == dbl_lit.value + + +@pytest.mark.parametrize( + "decimal_type, decimal_value", [(DecimalType(9, 1), "34.6"), (DecimalType(9, 2), "34.56"), (DecimalType(9, 4), "34.5600")] +) +def test_float_to_decimal_conversion(decimal_type: DecimalType, decimal_value: str) -> None: + lit = literal(34.56).to(FloatType()) + + assert lit.to(decimal_type).value.as_tuple() == Decimal(decimal_value).as_tuple() # type: ignore + + +def test_double_to_float_within_bound() -> None: + lit = literal(34.56).to(DoubleType()) + float_lit = lit.to(FloatType()) + + assert lit.value == float_lit.value + + +def test_double_to_float_outside_bound() -> None: + big_lit = literal(FloatType.max + 1.0e37).to(DoubleType()) + above_max_lit = big_lit.to(FloatType()) + assert above_max_lit == FloatAboveMax() + + small_lit = literal(FloatType.min - 1.0e37).to(DoubleType()) + below_min_lit = small_lit.to(FloatType()) + assert below_min_lit == FloatBelowMin() + + +@pytest.mark.parametrize( + "decimal_type, decimal_value", [(DecimalType(9, 1), "34.6"), (DecimalType(9, 2), "34.56"), (DecimalType(9, 4), "34.5600")] +) +def test_double_to_decimal_conversion(decimal_type: DecimalType, decimal_value: str) -> None: + lit = literal(34.56).to(DoubleType()) + + assert lit.to(decimal_type).value.as_tuple() == Decimal(decimal_value).as_tuple() # type: ignore + + +def test_decimal_to_decimal_conversion() -> None: + lit = literal(Decimal("34.11").quantize(Decimal(".01"))) + + assert lit.value.as_tuple() == lit.to(DecimalType(9, 2)).value.as_tuple() + assert lit.value.as_tuple() == lit.to(DecimalType(11, 2)).value.as_tuple() + with pytest.raises(ValueError) as e: + _ = lit.to(DecimalType(9, 0)) + assert "Could not convert 34.11 into a decimal(9, 0)" in str(e.value) + with pytest.raises(ValueError) as e: + _ = lit.to(DecimalType(9, 1)) + assert "Could not convert 34.11 into a decimal(9, 1)" in str(e.value) + with pytest.raises(ValueError) as e: + _ = lit.to(DecimalType(9, 3)) + assert "Could not convert 34.11 into a decimal(9, 3)" in str(e.value) + + +def test_timestamp_to_date() -> None: + epoch_lit = TimestampLiteral(int(datetime.datetime.fromisoformat("1970-01-01T01:23:45.678").timestamp() * 1_000_000)) + date_lit = epoch_lit.to(DateType()) + + assert date_lit.value == 0 + + +def test_string_literal() -> None: + sqrt2 = literal("1.414").to(StringType()) + pi = literal("3.141").to(StringType()) + pi_string_lit = StringLiteral("3.141") + pi_double_lit = literal(3.141).to(DoubleType()) + + assert literal("3.141").to(IntegerType()) == literal(3) + assert literal("3.141").to(LongType()) == literal(3) + + assert sqrt2 != pi + assert pi != pi_double_lit + assert pi == pi_string_lit + assert pi == pi + assert sqrt2 < pi + assert sqrt2 <= pi + assert pi > sqrt2 + assert pi >= sqrt2 + assert str(pi) == "3.141" + + +def test_string_to_string_literal() -> None: + assert literal("abc") == literal("abc").to(StringType()) + + +def test_string_to_date_literal() -> None: + one_day = "2017-08-18" + date_lit = literal(one_day).to(DateType()) + + date_delta = (datetime.date.fromisoformat(one_day) - datetime.date.fromisoformat("1970-01-01")).days + assert date_delta == date_lit.value + + +def test_string_to_time_literal() -> None: + time_str = literal("14:21:01.919") + time_lit = time_str.to(TimeType()) + + avro_val = 51661919000 + + assert isinstance(time_lit, TimeLiteral) # type: ignore + assert avro_val == time_lit.value # type: ignore + + +def test_string_to_timestamp_literal() -> None: + timestamp_str = literal("2017-08-18T14:21:01.919234+00:00") + timestamp = timestamp_str.to(TimestamptzType()) + + avro_val = 1503066061919234 + assert avro_val == timestamp.value + + timestamp_str = literal("2017-08-18T14:21:01.919234") + timestamp = timestamp_str.to(TimestampType()) + assert avro_val == timestamp.value + + timestamp_str = literal("2017-08-18T14:21:01.919234-07:00") + timestamp = timestamp_str.to(TimestamptzType()) + avro_val = 1503091261919234 + assert avro_val == timestamp.value + + +def test_timestamp_with_zone_without_zone_in_literal() -> None: + timestamp_str = literal("2017-08-18T14:21:01.919234") + with pytest.raises(ValueError) as e: + _ = timestamp_str.to(timestamp_str.to(TimestamptzType())) + assert "Missing zone offset: 2017-08-18T14:21:01.919234 (must be ISO-8601)" in str(e.value) + + +def test_invalid_timestamp_in_literal() -> None: + timestamp_str = literal("abc") + with pytest.raises(ValueError) as e: + _ = timestamp_str.to(timestamp_str.to(TimestamptzType())) + assert "Invalid timestamp with zone: abc (must be ISO-8601)" in str(e.value) + + +def test_timestamp_without_zone_with_zone_in_literal() -> None: + timestamp_str = literal("2017-08-18T14:21:01.919234+07:00") + with pytest.raises(ValueError) as e: + _ = timestamp_str.to(TimestampType()) + assert "Zone offset provided, but not expected: 2017-08-18T14:21:01.919234+07:00" in str(e.value) + + +def test_invalid_timestamp_with_zone_in_literal() -> None: + timestamp_str = literal("abc") + with pytest.raises(ValueError) as e: + _ = timestamp_str.to(TimestampType()) + assert "Invalid timestamp without zone: abc (must be ISO-8601)" in str(e.value) + + +def test_string_to_uuid_literal() -> None: + expected = uuid.uuid4() + uuid_str = literal(str(expected)) + uuid_lit = uuid_str.to(UUIDType()) + + assert expected.bytes == uuid_lit.value + + +def test_string_to_decimal_literal() -> None: + decimal_str = literal("34.560") + decimal_lit = decimal_str.to(DecimalType(9, 3)) + + assert 3 == abs(decimal_lit.value.as_tuple().exponent) # type: ignore + assert Decimal("34.560").as_tuple() == decimal_lit.value.as_tuple() # type: ignore + + +def test_string_to_boolean_literal() -> None: + assert literal(True) == literal("true").to(BooleanType()) + assert literal(True) == literal("True").to(BooleanType()) + assert literal(False) == literal("false").to(BooleanType()) + assert literal(False) == literal("False").to(BooleanType()) + + +def test_invalid_string_to_boolean_literal() -> None: + invalid_boolean_str = literal("unknown") + with pytest.raises(ValueError) as e: + _ = invalid_boolean_str.to(BooleanType()) + assert "Could not convert unknown into a boolean" in str(e.value) + + +# MISC + + +def test_python_date_conversion() -> None: + one_day_str = "2022-03-28" + + from_str_lit = literal(one_day_str).to(DateType()) + + assert isinstance(from_str_lit, DateLiteral) # type: ignore + assert from_str_lit.value == 19079 # type: ignore + + +@pytest.mark.parametrize( + "lit, primitive_type", + [ + (literal(True), BooleanType()), + (literal(34), IntegerType()), + (literal(3400000000), LongType()), + (literal(34.11), FloatType()), + (literal(3.5028235e38), DoubleType()), + (literal(Decimal(34.55).quantize(Decimal("0.01"))), DecimalType(9, 2)), + (literal("2017-08-18"), DateType()), + (literal("14:21:01.919"), TimeType()), + (literal("2017-08-18T14:21:01.919"), TimestampType()), + (literal("abc"), StringType()), + (literal(uuid.uuid4()), UUIDType()), + (literal(bytes([0x01, 0x02, 0x03])), FixedType(3)), + ], +) +def test_identity_conversions(lit: Literal[Any], primitive_type: PrimitiveType) -> None: + expected = lit.to(primitive_type) + assert expected is expected.to(primitive_type) + + +def test_fixed_literal() -> None: + fixed_lit012 = literal(bytes([0x00, 0x01, 0x02])) + fixed_lit013 = literal(bytes([0x00, 0x01, 0x03])) + assert fixed_lit012 == fixed_lit012 + assert fixed_lit012 != fixed_lit013 + assert fixed_lit012 < fixed_lit013 + assert fixed_lit012 <= fixed_lit013 + assert fixed_lit013 > fixed_lit012 + assert fixed_lit013 >= fixed_lit012 + + +def test_binary_literal() -> None: + bin_lit012 = literal(bytes([0x00, 0x01, 0x02])) + bin_lit013 = literal(bytes([0x00, 0x01, 0x03])) + assert bin_lit012 == bin_lit012 + assert bin_lit012 != bin_lit013 + assert bin_lit012 < bin_lit013 + assert bin_lit012 <= bin_lit013 + assert bin_lit013 > bin_lit012 + assert bin_lit013 >= bin_lit012 + # None related + + +def test_raise_on_comparison_to_none() -> None: + bin_lit012 = literal(bytes([0x00, 0x01, 0x02])) + fixed_lit012 = literal(bytes([0x00, 0x01, 0x02])) + + with pytest.raises(AttributeError): + _ = bin_lit012 < None + + with pytest.raises(AttributeError): + _ = bin_lit012 <= None + + with pytest.raises(AttributeError): + _ = bin_lit012 > None + + with pytest.raises(AttributeError): + _ = bin_lit012 >= None + + with pytest.raises(AttributeError): + _ = fixed_lit012 < None + + with pytest.raises(AttributeError): + _ = fixed_lit012 <= None + + with pytest.raises(AttributeError): + _ = fixed_lit012 > None + + with pytest.raises(AttributeError): + _ = fixed_lit012 >= None + + +def test_binary_to_fixed() -> None: + lit = literal(bytes([0x00, 0x01, 0x02])) + fixed_lit = lit.to(FixedType(3)) + assert fixed_lit is not None + assert lit.value == fixed_lit.value + + with pytest.raises(TypeError) as e: + _ = lit.to(FixedType(4)) + assert "Cannot convert BinaryLiteral into fixed[4], different length: 4 <> 3" in str(e.value) + + +def test_binary_to_smaller_fixed_none() -> None: + lit = literal(bytes([0x00, 0x01, 0x02])) + + with pytest.raises(TypeError) as e: + _ = lit.to(FixedType(2)) + assert "Cannot convert BinaryLiteral into fixed[2], different length: 2 <> 3" in str(e.value) + + +def test_binary_to_uuid() -> None: + test_uuid = uuid.uuid4() + lit = literal(test_uuid.bytes) + uuid_lit = lit.to(UUIDType()) + assert uuid_lit is not None + assert lit.value == uuid_lit.value + assert uuid_lit.value == test_uuid.bytes + + +def test_incompatible_binary_to_uuid() -> None: + lit = literal(bytes([0x00, 0x01, 0x02])) + with pytest.raises(TypeError) as e: + _ = lit.to(UUIDType()) + assert "Cannot convert BinaryLiteral into uuid, different length: 16 <> 3" in str(e.value) + + +def test_fixed_to_binary() -> None: + lit = literal(bytes([0x00, 0x01, 0x02])).to(FixedType(3)) + binary_lit = lit.to(BinaryType()) + assert binary_lit is not None + assert lit.value == binary_lit.value + + +def test_fixed_to_smaller_fixed_none() -> None: + lit = literal(bytes([0x00, 0x01, 0x02])).to(FixedType(3)) + with pytest.raises(ValueError) as e: + lit.to(lit.to(FixedType(2))) + assert "Could not convert b'\\x00\\x01\\x02' into a fixed[2]" in str(e.value) + + +def test_fixed_to_uuid() -> None: + test_uuid = uuid.uuid4() + lit = literal(test_uuid.bytes).to(FixedType(16)) + uuid_lit = lit.to(UUIDType()) + assert uuid_lit is not None + assert lit.value == uuid_lit.value + assert uuid_lit.value == test_uuid.bytes + + +def test_incompatible_fixed_to_uuid() -> None: + lit = literal(bytes([0x00, 0x01, 0x02])).to(FixedType(3)) + with pytest.raises(TypeError) as e: + _ = lit.to(UUIDType()) + assert "Cannot convert BinaryLiteral into uuid, different length: 16 <> 3" in str(e.value) + + +def test_above_max_float() -> None: + a = FloatAboveMax() + # singleton + assert a == FloatAboveMax() + assert str(a) == "FloatAboveMax" + assert repr(a) == "FloatAboveMax()" + assert a.value == FloatType.max + assert a == eval(repr(a)) + assert a.to(FloatType()) == FloatAboveMax() + + +def test_below_min_float() -> None: + b = FloatBelowMin() + # singleton + assert b == FloatBelowMin() + assert str(b) == "FloatBelowMin" + assert repr(b) == "FloatBelowMin()" + assert b == eval(repr(b)) + assert b.value == FloatType.min + assert b.to(FloatType()) == FloatBelowMin() + + +def test_above_max_int() -> None: + a = IntAboveMax() + # singleton + assert a == IntAboveMax() + assert str(a) == "IntAboveMax" + assert repr(a) == "IntAboveMax()" + assert a.value == IntegerType.max + assert a == eval(repr(a)) + assert a.to(IntegerType()) == IntAboveMax() + + +def test_below_min_int() -> None: + b = IntBelowMin() + # singleton + assert b == IntBelowMin() + assert str(b) == "IntBelowMin" + assert repr(b) == "IntBelowMin()" + assert b == eval(repr(b)) + assert b.to(IntegerType()) == IntBelowMin() + + +def test_invalid_boolean_conversions() -> None: + assert_invalid_conversions( + literal(True), + [ + IntegerType(), + LongType(), + FloatType(), + DoubleType(), + DateType(), + TimeType(), + TimestampType(), + TimestamptzType(), + DecimalType(9, 2), + StringType(), + UUIDType(), + BinaryType(), + ], + ) + + +def test_invalid_long_conversions() -> None: + assert_invalid_conversions( + literal(34).to(LongType()), + [BooleanType(), StringType(), UUIDType(), FixedType(1), BinaryType()], + ) + + +@pytest.mark.parametrize( + "lit", + [ + literal(34.11).to(FloatType()), + # double + literal(34.11).to(DoubleType()), + ], +) +@pytest.mark.parametrize( + "test_type", + [ + BooleanType(), + IntegerType(), + LongType(), + DateType(), + TimeType(), + TimestampType(), + TimestamptzType(), + StringType(), + UUIDType(), + FixedType(1), + BinaryType(), + ], +) +def test_invalid_float_conversions(lit: Literal[Any], test_type: PrimitiveType) -> None: + with pytest.raises(TypeError): + _ = lit.to(test_type) + + +@pytest.mark.parametrize("lit", [literal("2017-08-18").to(DateType())]) +@pytest.mark.parametrize( + "test_type", + [ + BooleanType(), + IntegerType(), + LongType(), + FloatType(), + DoubleType(), + TimeType(), + TimestampType(), + TimestamptzType(), + DecimalType(9, 2), + StringType(), + UUIDType(), + FixedType(1), + BinaryType(), + ], +) +def test_invalid_datetime_conversions(lit: Literal[Any], test_type: PrimitiveType) -> None: + assert_invalid_conversions(lit, [test_type]) + + +def test_invalid_time_conversions() -> None: + assert_invalid_conversions( + literal("14:21:01.919").to(TimeType()), + [ + BooleanType(), + IntegerType(), + LongType(), + FloatType(), + DoubleType(), + DateType(), + TimestampType(), + TimestamptzType(), + DecimalType(9, 2), + StringType(), + UUIDType(), + FixedType(1), + BinaryType(), + ], + ) + + +def test_invalid_timestamp_conversions() -> None: + assert_invalid_conversions( + literal("2017-08-18T14:21:01.919").to(TimestampType()), + [ + BooleanType(), + IntegerType(), + LongType(), + FloatType(), + DoubleType(), + TimeType(), + DecimalType(9, 2), + StringType(), + UUIDType(), + FixedType(1), + BinaryType(), + ], + ) + + +def test_invalid_decimal_conversion_scale() -> None: + lit = literal(Decimal("34.11")) + with pytest.raises(ValueError) as e: + lit.to(DecimalType(9, 4)) + assert "Could not convert 34.11 into a decimal(9, 4)" in str(e.value) + + +def test_invalid_decimal_conversions() -> None: + assert_invalid_conversions( + literal(Decimal("34.11")), + [ + BooleanType(), + DateType(), + TimeType(), + TimestampType(), + TimestamptzType(), + StringType(), + UUIDType(), + FixedType(1), + BinaryType(), + ], + ) + + +def test_invalid_string_conversions() -> None: + assert_invalid_conversions( + literal("abc"), + [FloatType(), DoubleType(), FixedType(1), BinaryType()], + ) + + +def test_invalid_uuid_conversions() -> None: + assert_invalid_conversions( + literal(uuid.uuid4()), + [ + BooleanType(), + IntegerType(), + LongType(), + FloatType(), + DoubleType(), + DateType(), + TimeType(), + TimestampType(), + TimestamptzType(), + DecimalType(9, 2), + StringType(), + FixedType(1), + BinaryType(), + ], + ) + + +def test_invalid_fixed_conversions() -> None: + assert_invalid_conversions( + literal(bytes([0x00, 0x01, 0x02])).to(FixedType(3)), + [ + BooleanType(), + IntegerType(), + LongType(), + FloatType(), + DoubleType(), + DateType(), + TimeType(), + TimestampType(), + TimestamptzType(), + DecimalType(9, 2), + StringType(), + UUIDType(), + ], + ) + + +def test_invalid_binary_conversions() -> None: + assert_invalid_conversions( + literal(bytes([0x00, 0x01, 0x02])), + [ + BooleanType(), + IntegerType(), + LongType(), + FloatType(), + DoubleType(), + DateType(), + TimeType(), + TimestampType(), + TimestamptzType(), + DecimalType(9, 2), + StringType(), + UUIDType(), + ], + ) + + +def assert_invalid_conversions(lit: Literal[Any], types: List[PrimitiveType]) -> None: + for type_var in types: + with pytest.raises(TypeError): + _ = lit.to(type_var) + + +def test_compare_floats() -> None: + lhs = literal(18.15).to(FloatType()) + rhs = literal(19.25).to(FloatType()) + assert lhs != rhs + assert lhs < rhs + assert lhs <= rhs + assert not lhs > rhs + assert not lhs >= rhs + + +def test_string_to_int_max_value() -> None: + assert isinstance(literal(str(IntegerType.max + 1)).to(IntegerType()), IntAboveMax) + + +def test_string_to_int_min_value() -> None: + assert isinstance(literal(str(IntegerType.min - 1)).to(IntegerType()), IntBelowMin) + + +def test_string_to_integer_type_invalid_value() -> None: + with pytest.raises(ValueError) as e: + _ = literal("abc").to(IntegerType()) + assert "Could not convert abc into a int" in str(e.value) + + +def test_string_to_long_type_invalid_value() -> None: + with pytest.raises(ValueError) as e: + _ = literal("abc").to(LongType()) + assert "Could not convert abc into a long" in str(e.value) + + +def test_string_to_date_type_invalid_value() -> None: + with pytest.raises(ValueError) as e: + _ = literal("abc").to(DateType()) + assert "Could not convert abc into a date" in str(e.value) + + +def test_string_to_time_type_invalid_value() -> None: + with pytest.raises(ValueError) as e: + _ = literal("abc").to(TimeType()) + assert "Could not convert abc into a time" in str(e.value) + + +def test_string_to_decimal_type_invalid_value() -> None: + with pytest.raises(ValueError) as e: + _ = literal("18.15").to(DecimalType(10, 0)) + assert "Could not convert 18.15 into a decimal(10, 0), scales differ 0 <> 2" in str(e.value) + + +def test_decimal_literal_increment() -> None: + dec = DecimalLiteral(Decimal("10.123")) + # Twice to check that we don't mutate the value + assert dec.increment() == DecimalLiteral(Decimal("10.124")) + assert dec.increment() == DecimalLiteral(Decimal("10.124")) + # To check that the scale is still the same + assert dec.increment().value.as_tuple() == Decimal("10.124").as_tuple() + + +def test_decimal_literal_dencrement() -> None: + dec = DecimalLiteral(Decimal("10.123")) + # Twice to check that we don't mutate the value + assert dec.decrement() == DecimalLiteral(Decimal("10.122")) + assert dec.decrement() == DecimalLiteral(Decimal("10.122")) + # To check that the scale is still the same + assert dec.decrement().value.as_tuple() == Decimal("10.122").as_tuple() + + +def test_uuid_literal_initialization() -> None: + test_uuid = uuid.UUID("f79c3e09-677c-4bbd-a479-3f349cb785e7") + uuid_literal = literal(test_uuid) + assert isinstance(uuid_literal, Literal) + assert test_uuid.bytes == uuid_literal.value + + +# __ __ ___ +# | \/ |_ _| _ \_ _ +# | |\/| | || | _/ || | +# |_| |_|\_, |_| \_, | +# |__/ |__/ + +assert_type(literal("str"), Literal[str]) +assert_type(literal(True), Literal[bool]) +assert_type(literal(123), Literal[int]) +assert_type(literal(123.4), Literal[float]) +assert_type(literal(bytes([0x01, 0x02, 0x03])), Literal[bytes]) +assert_type(literal(Decimal("19.25")), Literal[Decimal]) +assert_type({literal(1), literal(2), literal(3)}, Set[Literal[int]]) diff --git a/tests/expressions/test_parser.py b/tests/expressions/test_parser.py new file mode 100644 index 0000000000..f4bebca066 --- /dev/null +++ b/tests/expressions/test_parser.py @@ -0,0 +1,168 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +import pytest +from pyparsing import ParseException + +from pyiceberg.expressions import ( + AlwaysFalse, + AlwaysTrue, + And, + EqualTo, + GreaterThan, + GreaterThanOrEqual, + In, + IsNaN, + IsNull, + LessThan, + LessThanOrEqual, + Not, + NotEqualTo, + NotIn, + NotNaN, + NotNull, + NotStartsWith, + Or, + StartsWith, + parser, +) + + +def test_true() -> None: + assert AlwaysTrue() == parser.parse("true") + + +def test_false() -> None: + assert AlwaysFalse() == parser.parse("false") + + +def test_is_null() -> None: + assert IsNull("foo") == parser.parse("foo is null") + assert IsNull("foo") == parser.parse("foo IS NULL") + assert IsNull("foo") == parser.parse("table.foo IS NULL") + + +def test_not_null() -> None: + assert NotNull("foo") == parser.parse("foo is not null") + assert NotNull("foo") == parser.parse("foo IS NOT NULL") + + +def test_is_nan() -> None: + assert IsNaN("foo") == parser.parse("foo is nan") + assert IsNaN("foo") == parser.parse("foo IS NAN") + + +def test_not_nan() -> None: + assert NotNaN("foo") == parser.parse("foo is not nan") + assert NotNaN("foo") == parser.parse("foo IS NOT NaN") + + +def test_less_than() -> None: + assert LessThan("foo", 5) == parser.parse("foo < 5") + assert LessThan("foo", "a") == parser.parse("'a' > foo") + + +def test_less_than_or_equal() -> None: + assert LessThanOrEqual("foo", 5) == parser.parse("foo <= 5") + assert LessThanOrEqual("foo", "a") == parser.parse("'a' >= foo") + + +def test_greater_than() -> None: + assert GreaterThan("foo", 5) == parser.parse("foo > 5") + assert GreaterThan("foo", "a") == parser.parse("'a' < foo") + + +def test_greater_than_or_equal() -> None: + assert GreaterThanOrEqual("foo", 5) == parser.parse("foo <= 5") + assert GreaterThanOrEqual("foo", "a") == parser.parse("'a' >= foo") + + +def test_equal_to() -> None: + assert EqualTo("foo", 5) == parser.parse("foo = 5") + assert EqualTo("foo", "a") == parser.parse("'a' = foo") + assert EqualTo("foo", "a") == parser.parse("foo == 'a'") + assert EqualTo("foo", 5) == parser.parse("5 == foo") + + +def test_not_equal_to() -> None: + assert NotEqualTo("foo", 5) == parser.parse("foo != 5") + assert NotEqualTo("foo", "a") == parser.parse("'a' != foo") + assert NotEqualTo("foo", "a") == parser.parse("foo <> 'a'") + assert NotEqualTo("foo", 5) == parser.parse("5 <> foo") + + +def test_in() -> None: + assert In("foo", {5, 6, 7}) == parser.parse("foo in (5, 6, 7)") + assert In("foo", {"a", "b", "c"}) == parser.parse("foo IN ('a', 'b', 'c')") + + +def test_in_different_types() -> None: + with pytest.raises(ParseException): + parser.parse("foo in (5, 'a')") + + +def test_not_in() -> None: + assert NotIn("foo", {5, 6, 7}) == parser.parse("foo not in (5, 6, 7)") + assert NotIn("foo", {"a", "b", "c"}) == parser.parse("foo NOT IN ('a', 'b', 'c')") + + +def test_not_in_different_types() -> None: + with pytest.raises(ParseException): + parser.parse("foo not in (5, 'a')") + + +def test_simple_and() -> None: + assert And(GreaterThanOrEqual("foo", 5), LessThan("foo", 10)) == parser.parse("5 <= foo and foo < 10") + + +def test_and_with_not() -> None: + assert And(Not(GreaterThanOrEqual("foo", 5)), LessThan("foo", 10)) == parser.parse("not 5 <= foo and foo < 10") + assert And(GreaterThanOrEqual("foo", 5), Not(LessThan("foo", 10))) == parser.parse("5 <= foo and not foo < 10") + + +def test_or_with_not() -> None: + assert Or(Not(LessThan("foo", 5)), GreaterThan("foo", 10)) == parser.parse("not foo < 5 or 10 < foo") + assert Or(LessThan("foo", 5), Not(GreaterThan("foo", 10))) == parser.parse("foo < 5 or not 10 < foo") + + +def test_simple_or() -> None: + assert Or(LessThan("foo", 5), GreaterThan("foo", 10)) == parser.parse("foo < 5 or 10 < foo") + + +def test_and_or_without_parens() -> None: + assert Or(And(NotNull("foo"), LessThan("foo", 5)), GreaterThan("foo", 10)) == parser.parse( + "foo is not null and foo < 5 or 10 < foo" + ) + assert Or(IsNull("foo"), And(GreaterThanOrEqual("foo", 5), LessThan("foo", 10))) == parser.parse( + "foo is null or 5 <= foo and foo < 10" + ) + + +def test_and_or_with_parens() -> None: + assert And(NotNull("foo"), Or(LessThan("foo", 5), GreaterThan("foo", 10))) == parser.parse( + "foo is not null and (foo < 5 or 10 < foo)" + ) + assert Or(IsNull("foo"), And(GreaterThanOrEqual("foo", 5), Not(LessThan("foo", 10)))) == parser.parse( + "(foo is null) or (5 <= foo) and not(foo < 10)" + ) + + +def test_starts_with() -> None: + assert StartsWith("foo", "data") == parser.parse("foo LIKE 'data'") + + +def test_not_starts_with() -> None: + assert NotStartsWith("foo", "data") == parser.parse("foo NOT LIKE 'data'") diff --git a/tests/expressions/test_projection.py b/tests/expressions/test_projection.py new file mode 100644 index 0000000000..4d0c2c1346 --- /dev/null +++ b/tests/expressions/test_projection.py @@ -0,0 +1,378 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# pylint:disable=redefined-outer-name + +import pytest + +from pyiceberg.expressions import ( + AlwaysTrue, + And, + EqualTo, + GreaterThan, + GreaterThanOrEqual, + In, + IsNull, + LessThan, + LessThanOrEqual, + Not, + NotEqualTo, + NotIn, + NotNull, + Or, +) +from pyiceberg.expressions.visitors import inclusive_projection +from pyiceberg.partitioning import PartitionField, PartitionSpec +from pyiceberg.schema import Schema +from pyiceberg.transforms import ( + BucketTransform, + DayTransform, + HourTransform, + IdentityTransform, + TruncateTransform, +) +from pyiceberg.types import ( + DateType, + LongType, + NestedField, + StringType, + TimestampType, +) + + +@pytest.fixture +def schema() -> Schema: + return Schema( + NestedField(1, "id", LongType(), required=False), + NestedField(2, "data", StringType(), required=False), + NestedField(3, "event_date", DateType(), required=False), + NestedField(4, "event_ts", TimestampType(), required=False), + ) + + +@pytest.fixture +def empty_spec() -> PartitionSpec: + return PartitionSpec() + + +@pytest.fixture +def id_spec() -> PartitionSpec: + return PartitionSpec(PartitionField(1, 1000, IdentityTransform(), "id_part")) + + +@pytest.fixture +def bucket_spec() -> PartitionSpec: + return PartitionSpec(PartitionField(2, 1000, BucketTransform(16), "data_bucket")) + + +@pytest.fixture +def day_spec() -> PartitionSpec: + return PartitionSpec(PartitionField(4, 1000, DayTransform(), "date"), PartitionField(3, 1000, DayTransform(), "ddate")) + + +@pytest.fixture +def hour_spec() -> PartitionSpec: + return PartitionSpec(PartitionField(4, 1000, HourTransform(), "hour")) + + +@pytest.fixture +def truncate_str_spec() -> PartitionSpec: + return PartitionSpec(PartitionField(2, 1000, TruncateTransform(2), "data_trunc")) + + +@pytest.fixture +def truncate_int_spec() -> PartitionSpec: + return PartitionSpec(PartitionField(1, 1000, TruncateTransform(10), "id_trunc")) + + +@pytest.fixture +def id_and_bucket_spec() -> PartitionSpec: + return PartitionSpec( + PartitionField(1, 1000, IdentityTransform(), "id_part"), PartitionField(2, 1001, BucketTransform(16), "data_bucket") + ) + + +def test_identity_projection(schema: Schema, id_spec: PartitionSpec) -> None: + predicates = [ + NotNull("id"), + IsNull("id"), + LessThan("id", 100), + LessThanOrEqual("id", 101), + GreaterThan("id", 102), + GreaterThanOrEqual("id", 103), + EqualTo("id", 104), + NotEqualTo("id", 105), + In("id", {3, 4, 5}), + NotIn("id", {3, 4, 5}), + ] + + expected = [ + NotNull("id_part"), + IsNull("id_part"), + LessThan("id_part", 100), + LessThanOrEqual("id_part", 101), + GreaterThan("id_part", 102), + GreaterThanOrEqual("id_part", 103), + EqualTo("id_part", 104), + NotEqualTo("id_part", 105), + In("id_part", {3, 4, 5}), + NotIn("id_part", {3, 4, 5}), + ] + + project = inclusive_projection(schema, id_spec) + for index, predicate in enumerate(predicates): + expr = project(predicate) + assert expected[index] == expr + + +def test_bucket_projection(schema: Schema, bucket_spec: PartitionSpec) -> None: + predicates = [ + NotNull("data"), + IsNull("data"), + LessThan("data", "val"), + LessThanOrEqual("data", "val"), + GreaterThan("data", "val"), + GreaterThanOrEqual("data", "val"), + EqualTo("data", "val"), + NotEqualTo("data", "val"), + In("data", {"v1", "v2", "v3"}), + NotIn("data", {"v1", "v2", "v3"}), + ] + + expected = [ + NotNull("data_bucket"), + IsNull("data_bucket"), + AlwaysTrue(), + AlwaysTrue(), + AlwaysTrue(), + AlwaysTrue(), + EqualTo("data_bucket", 14), + AlwaysTrue(), + In("data_bucket", {1, 3, 13}), + AlwaysTrue(), + ] + + project = inclusive_projection(schema, bucket_spec) + for index, predicate in enumerate(predicates): + expr = project(predicate) + assert expected[index] == expr + + +def test_hour_projection(schema: Schema, hour_spec: PartitionSpec) -> None: + predicates = [ + NotNull("event_ts"), + IsNull("event_ts"), + LessThan("event_ts", "2022-11-27T10:00:00"), + LessThanOrEqual("event_ts", "2022-11-27T10:00:00"), + GreaterThan("event_ts", "2022-11-27T09:59:59.999999"), + GreaterThanOrEqual("event_ts", "2022-11-27T09:59:59.999999"), + EqualTo("event_ts", "2022-11-27T10:00:00"), + NotEqualTo("event_ts", "2022-11-27T10:00:00"), + In("event_ts", {"2022-11-27T10:00:00", "2022-11-27T09:59:59.999999"}), + NotIn("event_ts", {"2022-11-27T10:00:00", "2022-11-27T09:59:59.999999"}), + ] + + expected = [ + NotNull("hour"), + IsNull("hour"), + LessThanOrEqual("hour", 463761), + LessThanOrEqual("hour", 463762), + GreaterThanOrEqual("hour", 463762), + GreaterThanOrEqual("hour", 463761), + EqualTo("hour", 463762), + AlwaysTrue(), + In("hour", {463761, 463762}), + AlwaysTrue(), + ] + + project = inclusive_projection(schema, hour_spec) + for index, predicate in enumerate(predicates): + expr = project(predicate) + assert expected[index] == expr, predicate + + +def test_day_projection(schema: Schema, day_spec: PartitionSpec) -> None: + predicates = [ + NotNull("event_ts"), + IsNull("event_ts"), + LessThan("event_ts", "2022-11-27T00:00:00"), + LessThanOrEqual("event_ts", "2022-11-27T00:00:00"), + GreaterThan("event_ts", "2022-11-26T23:59:59.999999"), + GreaterThanOrEqual("event_ts", "2022-11-26T23:59:59.999999"), + EqualTo("event_ts", "2022-11-27T10:00:00"), + NotEqualTo("event_ts", "2022-11-27T10:00:00"), + In("event_ts", {"2022-11-27T00:00:00", "2022-11-26T23:59:59.999999"}), + NotIn("event_ts", {"2022-11-27T00:00:00", "2022-11-26T23:59:59.999999"}), + ] + + expected = [ + NotNull("date"), + IsNull("date"), + LessThanOrEqual("date", 19322), + LessThanOrEqual("date", 19323), + GreaterThanOrEqual("date", 19323), + GreaterThanOrEqual("date", 19322), + EqualTo("date", 19323), + AlwaysTrue(), + In("date", {19322, 19323}), + AlwaysTrue(), + ] + + project = inclusive_projection(schema, day_spec) + for index, predicate in enumerate(predicates): + expr = project(predicate) + assert expected[index] == expr, predicate + + +def test_date_day_projection(schema: Schema, day_spec: PartitionSpec) -> None: + predicates = [ + NotNull("event_date"), + IsNull("event_date"), + LessThan("event_date", "2022-11-27"), + LessThanOrEqual("event_date", "2022-11-27"), + GreaterThan("event_date", "2022-11-26"), + GreaterThanOrEqual("event_date", "2022-11-26"), + EqualTo("event_date", "2022-11-27"), + NotEqualTo("event_date", "2022-11-27"), + In("event_date", {"2022-11-26", "2022-11-27"}), + NotIn("event_date", {"2022-11-26", "2022-11-27"}), + ] + + expected = [ + NotNull("ddate"), + IsNull("ddate"), + LessThanOrEqual("ddate", 19322), + LessThanOrEqual("ddate", 19323), + GreaterThanOrEqual("ddate", 19323), + GreaterThanOrEqual("ddate", 19322), + EqualTo("ddate", 19323), + AlwaysTrue(), + In("ddate", {19322, 19323}), + AlwaysTrue(), + ] + + project = inclusive_projection(schema, day_spec) + for index, predicate in enumerate(predicates): + expr = project(predicate) + assert expected[index] == expr, predicate + + +def test_string_truncate_projection(schema: Schema, truncate_str_spec: PartitionSpec) -> None: + predicates = [ + NotNull("data"), + IsNull("data"), + LessThan("data", "aaa"), + LessThanOrEqual("data", "aaa"), + GreaterThan("data", "aaa"), + GreaterThanOrEqual("data", "aaa"), + EqualTo("data", "aaa"), + NotEqualTo("data", "aaa"), + In("data", {"aaa", "aab"}), + NotIn("data", {"aaa", "aab"}), + ] + + expected = [ + NotNull("data_trunc"), + IsNull("data_trunc"), + LessThanOrEqual("data_trunc", "aa"), + LessThanOrEqual("data_trunc", "aa"), + GreaterThanOrEqual("data_trunc", "aa"), + GreaterThanOrEqual("data_trunc", "aa"), + EqualTo("data_trunc", "aa"), + AlwaysTrue(), + EqualTo("data_trunc", "aa"), + AlwaysTrue(), + ] + + project = inclusive_projection(schema, truncate_str_spec) + for index, predicate in enumerate(predicates): + expr = project(predicate) + assert expected[index] == expr, predicate + + +def test_int_truncate_projection(schema: Schema, truncate_int_spec: PartitionSpec) -> None: + predicates = [ + NotNull("id"), + IsNull("id"), + LessThan("id", 10), + LessThanOrEqual("id", 10), + GreaterThan("id", 9), + GreaterThanOrEqual("id", 10), + EqualTo("id", 15), + NotEqualTo("id", 15), + In("id", {15, 16}), + NotIn("id", {15, 16}), + ] + + expected = [ + NotNull("id_trunc"), + IsNull("id_trunc"), + LessThanOrEqual("id_trunc", 0), + LessThanOrEqual("id_trunc", 10), + GreaterThanOrEqual("id_trunc", 10), + GreaterThanOrEqual("id_trunc", 10), + EqualTo("id_trunc", 10), + AlwaysTrue(), + EqualTo("id_trunc", 10), + AlwaysTrue(), + ] + + project = inclusive_projection(schema, truncate_int_spec) + for index, predicate in enumerate(predicates): + expr = project(predicate) + assert expected[index] == expr, predicate + + +def test_projection_case_sensitive(schema: Schema, id_spec: PartitionSpec) -> None: + project = inclusive_projection(schema, id_spec) + with pytest.raises(ValueError) as exc_info: + project(NotNull("ID")) + assert str(exc_info) == "Could not find field with name ID, case_sensitive=True" + + +def test_projection_case_insensitive(schema: Schema, id_spec: PartitionSpec) -> None: + project = inclusive_projection(schema, id_spec, case_sensitive=False) + assert NotNull("id_part") == project(NotNull("ID")) + + +def test_projection_empty_spec(schema: Schema, empty_spec: PartitionSpec) -> None: + project = inclusive_projection(schema, empty_spec) + assert AlwaysTrue() == project(And(LessThan("id", 5), NotNull("data"))) + + +def test_and_projection_multiple_projected_fields(schema: Schema, id_and_bucket_spec: PartitionSpec) -> None: + project = inclusive_projection(schema, id_and_bucket_spec) + assert project(And(LessThan("id", 5), In("data", {"a", "b", "c"}))) == And( + LessThan("id_part", 5), In("data_bucket", {2, 3, 15}) + ) + + +def test_or_projection_multiple_projected_fields(schema: Schema, id_and_bucket_spec: PartitionSpec) -> None: + project = inclusive_projection(schema, id_and_bucket_spec) + assert project(Or(LessThan("id", 5), In("data", {"a", "b", "c"}))) == Or( + LessThan("id_part", 5), In("data_bucket", {2, 3, 15}) + ) + + +def test_not_projection_multiple_projected_fields(schema: Schema, id_and_bucket_spec: PartitionSpec) -> None: + project = inclusive_projection(schema, id_and_bucket_spec) + # Not causes In to be rewritten to NotIn, which cannot be projected + assert project(Not(Or(LessThan("id", 5), In("data", {"a", "b", "c"})))) == GreaterThanOrEqual("id_part", 5) + + +def test_projection_partial_projected_fields(schema: Schema, id_spec: PartitionSpec) -> None: + project = inclusive_projection(schema, id_spec) + assert project(And(LessThan("id", 5), In("data", {"a", "b", "c"}))) == LessThan("id_part", 5) diff --git a/tests/expressions/test_visitors.py b/tests/expressions/test_visitors.py new file mode 100644 index 0000000000..50db90ceac --- /dev/null +++ b/tests/expressions/test_visitors.py @@ -0,0 +1,1631 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# pylint:disable=redefined-outer-name + +from typing import Any, List, Set + +import pytest + +from pyiceberg.conversions import to_bytes +from pyiceberg.expressions import ( + AlwaysFalse, + AlwaysTrue, + And, + BooleanExpression, + BoundEqualTo, + BoundGreaterThan, + BoundGreaterThanOrEqual, + BoundIn, + BoundIsNaN, + BoundIsNull, + BoundLessThan, + BoundLessThanOrEqual, + BoundNotEqualTo, + BoundNotIn, + BoundNotNaN, + BoundNotNull, + BoundNotStartsWith, + BoundPredicate, + BoundReference, + BoundStartsWith, + BoundTerm, + EqualTo, + GreaterThan, + GreaterThanOrEqual, + In, + IsNaN, + IsNull, + LessThan, + LessThanOrEqual, + Not, + NotEqualTo, + NotIn, + NotNaN, + NotNull, + NotStartsWith, + Or, + Reference, + StartsWith, + UnboundPredicate, +) +from pyiceberg.expressions.literals import Literal, literal +from pyiceberg.expressions.visitors import ( + BindVisitor, + BooleanExpressionVisitor, + BoundBooleanExpressionVisitor, + _ManifestEvalVisitor, + expression_evaluator, + expression_to_plain_format, + rewrite_not, + rewrite_to_dnf, + visit, + visit_bound_predicate, +) +from pyiceberg.manifest import ManifestFile, PartitionFieldSummary +from pyiceberg.schema import Accessor, Schema +from pyiceberg.typedef import Record +from pyiceberg.types import ( + DoubleType, + FloatType, + IcebergType, + IntegerType, + NestedField, + PrimitiveType, + StringType, +) + + +class ExampleVisitor(BooleanExpressionVisitor[List[str]]): + """A test implementation of a BooleanExpressionVisitor + + As this visitor visits each node, it appends an element to a `visit_history` list. This enables testing that a given expression is + visited in an expected order by the `visit` method. + """ + + def __init__(self) -> None: + self.visit_history: List[str] = [] + + def visit_true(self) -> List[str]: + self.visit_history.append("TRUE") + return self.visit_history + + def visit_false(self) -> List[str]: + self.visit_history.append("FALSE") + return self.visit_history + + def visit_not(self, child_result: List[str]) -> List[str]: + self.visit_history.append("NOT") + return self.visit_history + + def visit_and(self, left_result: List[str], right_result: List[str]) -> List[str]: + self.visit_history.append("AND") + return self.visit_history + + def visit_or(self, left_result: List[str], right_result: List[str]) -> List[str]: + self.visit_history.append("OR") + return self.visit_history + + def visit_unbound_predicate(self, predicate: UnboundPredicate[Any]) -> List[str]: + self.visit_history.append(str(predicate.__class__.__name__).upper()) + return self.visit_history + + def visit_bound_predicate(self, predicate: BoundPredicate[Any]) -> List[str]: + self.visit_history.append(str(predicate.__class__.__name__).upper()) + return self.visit_history + + +class FooBoundBooleanExpressionVisitor(BoundBooleanExpressionVisitor[List[str]]): + """A test implementation of a BoundBooleanExpressionVisitor + As this visitor visits each node, it appends an element to a `visit_history` list. This enables testing that a given bound expression is + visited in an expected order by the `visit` method. + """ + + def __init__(self) -> None: + self.visit_history: List[str] = [] + + def visit_in(self, term: BoundTerm[Any], literals: Set[Any]) -> List[str]: + self.visit_history.append("IN") + return self.visit_history + + def visit_not_in(self, term: BoundTerm[Any], literals: Set[Any]) -> List[str]: + self.visit_history.append("NOT_IN") + return self.visit_history + + def visit_is_nan(self, term: BoundTerm[Any]) -> List[str]: + self.visit_history.append("IS_NAN") + return self.visit_history + + def visit_not_nan(self, term: BoundTerm[Any]) -> List[str]: + self.visit_history.append("NOT_NAN") + return self.visit_history + + def visit_is_null(self, term: BoundTerm[Any]) -> List[str]: + self.visit_history.append("IS_NULL") + return self.visit_history + + def visit_not_null(self, term: BoundTerm[Any]) -> List[str]: + self.visit_history.append("NOT_NULL") + return self.visit_history + + def visit_equal(self, term: BoundTerm[Any], literal: Literal[Any]) -> List[str]: # pylint: disable=redefined-outer-name + self.visit_history.append("EQUAL") + return self.visit_history + + def visit_not_equal(self, term: BoundTerm[Any], literal: Literal[Any]) -> List[str]: # pylint: disable=redefined-outer-name + self.visit_history.append("NOT_EQUAL") + return self.visit_history + + def visit_greater_than_or_equal( + self, term: BoundTerm[Any], literal: Literal[Any] + ) -> List[str]: # pylint: disable=redefined-outer-name + self.visit_history.append("GREATER_THAN_OR_EQUAL") + return self.visit_history + + def visit_greater_than( + self, term: BoundTerm[Any], literal: Literal[Any] + ) -> List[str]: # pylint: disable=redefined-outer-name + self.visit_history.append("GREATER_THAN") + return self.visit_history + + def visit_less_than(self, term: BoundTerm[Any], literal: Literal[Any]) -> List[str]: # pylint: disable=redefined-outer-name + self.visit_history.append("LESS_THAN") + return self.visit_history + + def visit_less_than_or_equal( + self, term: BoundTerm[Any], literal: Literal[Any] + ) -> List[str]: # pylint: disable=redefined-outer-name + self.visit_history.append("LESS_THAN_OR_EQUAL") + return self.visit_history + + def visit_true(self) -> List[str]: + self.visit_history.append("TRUE") + return self.visit_history + + def visit_false(self) -> List[str]: + self.visit_history.append("FALSE") + return self.visit_history + + def visit_not(self, child_result: List[str]) -> List[str]: + self.visit_history.append("NOT") + return self.visit_history + + def visit_and(self, left_result: List[str], right_result: List[str]) -> List[str]: + self.visit_history.append("AND") + return self.visit_history + + def visit_or(self, left_result: List[str], right_result: List[str]) -> List[str]: + self.visit_history.append("OR") + return self.visit_history + + def visit_starts_with(self, term: BoundTerm[Any], literal: Literal[Any]) -> List[str]: + self.visit_history.append("STARTS_WITH") + return self.visit_history + + def visit_not_starts_with(self, term: BoundTerm[Any], literal: Literal[Any]) -> List[str]: + self.visit_history.append("NOT_STARTS_WITH") + return self.visit_history + + +def test_boolean_expression_visitor() -> None: + """Test post-order traversal of boolean expression visit method""" + expr = And( + Or(Not(EqualTo("a", 1)), Not(NotEqualTo("b", 0)), EqualTo("a", 1), NotEqualTo("b", 0)), + Not(EqualTo("a", 1)), + NotEqualTo("b", 0), + ) + visitor = ExampleVisitor() + result = visit(expr, visitor=visitor) + assert result == [ + "EQUALTO", + "NOT", + "NOTEQUALTO", + "NOT", + "OR", + "EQUALTO", + "OR", + "NOTEQUALTO", + "OR", + "EQUALTO", + "NOT", + "AND", + "NOTEQUALTO", + "AND", + ] + + +def test_boolean_expression_visit_raise_not_implemented_error() -> None: + """Test raise NotImplementedError when visiting an unsupported object type""" + visitor = ExampleVisitor() + with pytest.raises(NotImplementedError) as exc_info: + visit("foo", visitor=visitor) # type: ignore + + assert str(exc_info.value) == "Cannot visit unsupported expression: foo" + + +def test_bind_visitor_already_bound(table_schema_simple: Schema) -> None: + bound = BoundEqualTo[str]( + term=BoundReference(table_schema_simple.find_field(1), table_schema_simple.accessor_for_field(1)), + literal=literal("hello"), + ) + with pytest.raises(TypeError) as exc_info: + visit(bound, visitor=BindVisitor(schema=table_schema_simple, case_sensitive=True)) + assert ( + "Found already bound predicate: BoundEqualTo(term=BoundReference(field=NestedField(field_id=1, name='foo', field_type=StringType(), required=False), accessor=Accessor(position=0,inner=None)), literal=literal('hello'))" + == str(exc_info.value) + ) + + +def test_visit_bound_visitor_unknown_predicate() -> None: + with pytest.raises(TypeError) as exc_info: + visit_bound_predicate({"something"}, FooBoundBooleanExpressionVisitor()) # type: ignore + assert "Unknown predicate: {'something'}" == str(exc_info.value) + + +def test_always_true_expression_binding(table_schema_simple: Schema) -> None: + """Test that visiting an always-true expression returns always-true""" + unbound_expression = AlwaysTrue() + bound_expression = visit(unbound_expression, visitor=BindVisitor(schema=table_schema_simple, case_sensitive=True)) + assert bound_expression == AlwaysTrue() + + +def test_always_false_expression_binding(table_schema_simple: Schema) -> None: + """Test that visiting an always-false expression returns always-false""" + unbound_expression = AlwaysFalse() + bound_expression = visit(unbound_expression, visitor=BindVisitor(schema=table_schema_simple, case_sensitive=True)) + assert bound_expression == AlwaysFalse() + + +def test_always_false_and_always_true_expression_binding(table_schema_simple: Schema) -> None: + """Test that visiting both an always-true AND always-false expression returns always-false""" + unbound_expression = And(AlwaysTrue(), AlwaysFalse()) + bound_expression = visit(unbound_expression, visitor=BindVisitor(schema=table_schema_simple, case_sensitive=True)) + assert bound_expression == AlwaysFalse() + + +def test_always_false_or_always_true_expression_binding(table_schema_simple: Schema) -> None: + """Test that visiting always-true OR always-false expression returns always-true""" + unbound_expression = Or(AlwaysTrue(), AlwaysFalse()) + bound_expression = visit(unbound_expression, visitor=BindVisitor(schema=table_schema_simple, case_sensitive=True)) + assert bound_expression == AlwaysTrue() + + +@pytest.mark.parametrize( + "unbound_and_expression,expected_bound_expression", + [ + ( + And( + In(Reference("foo"), {"foo", "bar"}), + In(Reference("bar"), {1, 2, 3}), + ), + And( + BoundIn( + BoundReference( + field=NestedField(field_id=1, name="foo", field_type=StringType(), required=False), + accessor=Accessor(position=0, inner=None), + ), + {literal("foo"), literal("bar")}, + ), + BoundIn[int]( + BoundReference( + field=NestedField(field_id=2, name="bar", field_type=IntegerType(), required=True), + accessor=Accessor(position=1, inner=None), + ), + {literal(1), literal(2), literal(3)}, + ), + ), + ), + ( + And( + In(Reference("foo"), ("bar", "baz")), + In( + Reference("bar"), + (1,), + ), + In( + Reference("foo"), + ("baz",), + ), + ), + And( + And( + BoundIn( + BoundReference( + field=NestedField(field_id=1, name="foo", field_type=StringType(), required=False), + accessor=Accessor(position=0, inner=None), + ), + {literal("bar"), literal("baz")}, + ), + BoundEqualTo[int]( + BoundReference( + field=NestedField(field_id=2, name="bar", field_type=IntegerType(), required=True), + accessor=Accessor(position=1, inner=None), + ), + literal(1), + ), + ), + BoundEqualTo( + BoundReference( + field=NestedField(field_id=1, name="foo", field_type=StringType(), required=False), + accessor=Accessor(position=0, inner=None), + ), + literal("baz"), + ), + ), + ), + ], +) +def test_and_expression_binding( + unbound_and_expression: UnboundPredicate[Any], expected_bound_expression: BoundPredicate[Any], table_schema_simple: Schema +) -> None: + """Test that visiting an unbound AND expression with a bind-visitor returns the expected bound expression""" + bound_expression = visit(unbound_and_expression, visitor=BindVisitor(schema=table_schema_simple, case_sensitive=True)) + assert bound_expression == expected_bound_expression + + +@pytest.mark.parametrize( + "unbound_or_expression,expected_bound_expression", + [ + ( + Or( + In(Reference("foo"), ("foo", "bar")), + In(Reference("bar"), (1, 2, 3)), + ), + Or( + BoundIn( + BoundReference( + field=NestedField(field_id=1, name="foo", field_type=StringType(), required=False), + accessor=Accessor(position=0, inner=None), + ), + {literal("foo"), literal("bar")}, + ), + BoundIn[int]( + BoundReference( + field=NestedField(field_id=2, name="bar", field_type=IntegerType(), required=True), + accessor=Accessor(position=1, inner=None), + ), + {literal(1), literal(2), literal(3)}, + ), + ), + ), + ( + Or( + In(Reference("foo"), ("bar", "baz")), + In( + Reference("foo"), + ("bar",), + ), + In( + Reference("foo"), + ("baz",), + ), + ), + Or( + Or( + BoundIn( + BoundReference( + field=NestedField(field_id=1, name="foo", field_type=StringType(), required=False), + accessor=Accessor(position=0, inner=None), + ), + {literal("bar"), literal("baz")}, + ), + BoundIn( + BoundReference( + field=NestedField(field_id=1, name="foo", field_type=StringType(), required=False), + accessor=Accessor(position=0, inner=None), + ), + {literal("bar")}, + ), + ), + BoundIn( + BoundReference( + field=NestedField(field_id=1, name="foo", field_type=StringType(), required=False), + accessor=Accessor(position=0, inner=None), + ), + {literal("baz")}, + ), + ), + ), + ( + Or( + AlwaysTrue(), + AlwaysFalse(), + ), + AlwaysTrue(), + ), + ( + Or( + AlwaysTrue(), + AlwaysTrue(), + ), + AlwaysTrue(), + ), + ( + Or( + AlwaysFalse(), + AlwaysFalse(), + ), + AlwaysFalse(), + ), + ], +) +def test_or_expression_binding( + unbound_or_expression: UnboundPredicate[Any], expected_bound_expression: BoundPredicate[Any], table_schema_simple: Schema +) -> None: + """Test that visiting an unbound OR expression with a bind-visitor returns the expected bound expression""" + bound_expression = visit(unbound_or_expression, visitor=BindVisitor(schema=table_schema_simple, case_sensitive=True)) + assert bound_expression == expected_bound_expression + + +@pytest.mark.parametrize( + "unbound_in_expression,expected_bound_expression", + [ + ( + In(Reference("foo"), ("foo", "bar")), + BoundIn( + BoundReference( + field=NestedField(field_id=1, name="foo", field_type=StringType(), required=False), + accessor=Accessor(position=0, inner=None), + ), + {literal("foo"), literal("bar")}, + ), + ), + ( + In(Reference("foo"), ("bar", "baz")), + BoundIn( + BoundReference( + field=NestedField(field_id=1, name="foo", field_type=StringType(), required=False), + accessor=Accessor(position=0, inner=None), + ), + {literal("bar"), literal("baz")}, + ), + ), + ( + In( + Reference("foo"), + ("bar",), + ), + BoundEqualTo( + BoundReference( + field=NestedField(field_id=1, name="foo", field_type=StringType(), required=False), + accessor=Accessor(position=0, inner=None), + ), + literal("bar"), + ), + ), + ], +) +def test_in_expression_binding( + unbound_in_expression: UnboundPredicate[Any], expected_bound_expression: BoundPredicate[Any], table_schema_simple: Schema +) -> None: + """Test that visiting an unbound IN expression with a bind-visitor returns the expected bound expression""" + bound_expression = visit(unbound_in_expression, visitor=BindVisitor(schema=table_schema_simple, case_sensitive=True)) + assert bound_expression == expected_bound_expression + + +@pytest.mark.parametrize( + "unbound_not_expression,expected_bound_expression", + [ + ( + Not(In(Reference("foo"), ("foo", "bar"))), + Not( + BoundIn( + BoundReference( + field=NestedField(field_id=1, name="foo", field_type=StringType(), required=False), + accessor=Accessor(position=0, inner=None), + ), + {literal("foo"), literal("bar")}, + ) + ), + ), + ( + Not( + Or( + In(Reference("foo"), ("foo", "bar")), + In(Reference("foo"), ("foo", "bar", "baz")), + ) + ), + Not( + Or( + BoundIn( + BoundReference( + field=NestedField(field_id=1, name="foo", field_type=StringType(), required=False), + accessor=Accessor(position=0, inner=None), + ), + {literal("foo"), literal("bar")}, + ), + BoundIn( + BoundReference( + field=NestedField(field_id=1, name="foo", field_type=StringType(), required=False), + accessor=Accessor(position=0, inner=None), + ), + {literal("foo"), literal("bar"), literal("baz")}, + ), + ), + ), + ), + ], +) +def test_not_expression_binding( + unbound_not_expression: UnboundPredicate[Any], expected_bound_expression: BoundPredicate[Any], table_schema_simple: Schema +) -> None: + """Test that visiting an unbound NOT expression with a bind-visitor returns the expected bound expression""" + bound_expression = visit(unbound_not_expression, visitor=BindVisitor(schema=table_schema_simple, case_sensitive=True)) + assert bound_expression == expected_bound_expression + + +def test_bound_boolean_expression_visitor_and_in() -> None: + """Test visiting an And and In expression with a bound boolean expression visitor""" + bound_expression = And( + BoundIn( + term=BoundReference( + field=NestedField(field_id=1, name="foo", field_type=StringType(), required=False), + accessor=Accessor(position=0, inner=None), + ), + literals={literal("foo"), literal("bar")}, + ), + BoundIn( + term=BoundReference( + field=NestedField(field_id=2, name="bar", field_type=StringType(), required=False), + accessor=Accessor(position=1, inner=None), + ), + literals={literal("baz"), literal("qux")}, + ), + ) + visitor = FooBoundBooleanExpressionVisitor() + result = visit(bound_expression, visitor=visitor) + assert result == ["IN", "IN", "AND"] + + +def test_bound_boolean_expression_visitor_or() -> None: + """Test visiting an Or expression with a bound boolean expression visitor""" + bound_expression = Or( + Not( + BoundIn( + BoundReference( + field=NestedField(field_id=1, name="foo", field_type=StringType(), required=False), + accessor=Accessor(position=0, inner=None), + ), + {literal("foo"), literal("bar")}, + ) + ), + Not( + BoundIn( + BoundReference( + field=NestedField(field_id=2, name="bar", field_type=StringType(), required=False), + accessor=Accessor(position=1, inner=None), + ), + {literal("baz"), literal("qux")}, + ) + ), + ) + visitor = FooBoundBooleanExpressionVisitor() + result = visit(bound_expression, visitor=visitor) + assert result == ["IN", "NOT", "IN", "NOT", "OR"] + + +def test_bound_boolean_expression_visitor_equal() -> None: + bound_expression = BoundEqualTo( + term=BoundReference( + field=NestedField(field_id=2, name="bar", field_type=StringType(), required=False), + accessor=Accessor(position=1, inner=None), + ), + literal=literal("foo"), + ) + visitor = FooBoundBooleanExpressionVisitor() + result = visit(bound_expression, visitor=visitor) + assert result == ["EQUAL"] + + +def test_bound_boolean_expression_visitor_not_equal() -> None: + bound_expression = BoundNotEqualTo( + term=BoundReference( + field=NestedField(field_id=1, name="foo", field_type=StringType(), required=False), + accessor=Accessor(position=0, inner=None), + ), + literal=literal("foo"), + ) + visitor = FooBoundBooleanExpressionVisitor() + result = visit(bound_expression, visitor=visitor) + assert result == ["NOT_EQUAL"] + + +def test_bound_boolean_expression_visitor_always_true() -> None: + bound_expression = AlwaysTrue() + visitor = FooBoundBooleanExpressionVisitor() + result = visit(bound_expression, visitor=visitor) + assert result == ["TRUE"] + + +def test_bound_boolean_expression_visitor_always_false() -> None: + bound_expression = AlwaysFalse() + visitor = FooBoundBooleanExpressionVisitor() + result = visit(bound_expression, visitor=visitor) + assert result == ["FALSE"] + + +def test_bound_boolean_expression_visitor_in() -> None: + bound_expression = BoundIn( + term=BoundReference( + field=NestedField(field_id=1, name="foo", field_type=StringType(), required=False), + accessor=Accessor(position=0, inner=None), + ), + literals={literal("foo"), literal("bar")}, + ) + visitor = FooBoundBooleanExpressionVisitor() + result = visit(bound_expression, visitor=visitor) + assert result == ["IN"] + + +def test_bound_boolean_expression_visitor_not_in() -> None: + bound_expression = BoundNotIn( + term=BoundReference( + field=NestedField(field_id=1, name="foo", field_type=StringType(), required=False), + accessor=Accessor(position=0, inner=None), + ), + literals={literal("foo"), literal("bar")}, + ) + visitor = FooBoundBooleanExpressionVisitor() + result = visit(bound_expression, visitor=visitor) + assert result == ["NOT_IN"] + + +def test_bound_boolean_expression_visitor_is_nan() -> None: + bound_expression = BoundIsNaN( + term=BoundReference( + field=NestedField(field_id=3, name="baz", field_type=FloatType(), required=False), + accessor=Accessor(position=0, inner=None), + ), + ) + visitor = FooBoundBooleanExpressionVisitor() + result = visit(bound_expression, visitor=visitor) + assert result == ["IS_NAN"] + + +def test_bound_boolean_expression_visitor_not_nan() -> None: + bound_expression = BoundNotNaN( + term=BoundReference( + field=NestedField(field_id=3, name="baz", field_type=FloatType(), required=False), + accessor=Accessor(position=0, inner=None), + ), + ) + visitor = FooBoundBooleanExpressionVisitor() + result = visit(bound_expression, visitor=visitor) + assert result == ["NOT_NAN"] + + +def test_bound_boolean_expression_visitor_is_null() -> None: + bound_expression = BoundIsNull( + term=BoundReference( + field=NestedField(field_id=1, name="foo", field_type=StringType(), required=False), + accessor=Accessor(position=0, inner=None), + ), + ) + visitor = FooBoundBooleanExpressionVisitor() + result = visit(bound_expression, visitor=visitor) + assert result == ["IS_NULL"] + + +def test_bound_boolean_expression_visitor_not_null() -> None: + bound_expression = BoundNotNull( + term=BoundReference( + field=NestedField(field_id=1, name="foo", field_type=StringType(), required=False), + accessor=Accessor(position=0, inner=None), + ), + ) + visitor = FooBoundBooleanExpressionVisitor() + result = visit(bound_expression, visitor=visitor) + assert result == ["NOT_NULL"] + + +def test_bound_boolean_expression_visitor_greater_than() -> None: + bound_expression = BoundGreaterThan( + term=BoundReference( + field=NestedField(field_id=1, name="foo", field_type=StringType(), required=False), + accessor=Accessor(position=0, inner=None), + ), + literal=literal("foo"), + ) + visitor = FooBoundBooleanExpressionVisitor() + result = visit(bound_expression, visitor=visitor) + assert result == ["GREATER_THAN"] + + +def test_bound_boolean_expression_visitor_greater_than_or_equal() -> None: + bound_expression = BoundGreaterThanOrEqual( + term=BoundReference( + field=NestedField(field_id=1, name="foo", field_type=StringType(), required=False), + accessor=Accessor(position=0, inner=None), + ), + literal=literal("foo"), + ) + visitor = FooBoundBooleanExpressionVisitor() + result = visit(bound_expression, visitor=visitor) + assert result == ["GREATER_THAN_OR_EQUAL"] + + +def test_bound_boolean_expression_visitor_less_than() -> None: + bound_expression = BoundLessThan( + term=BoundReference( + field=NestedField(field_id=1, name="foo", field_type=StringType(), required=False), + accessor=Accessor(position=0, inner=None), + ), + literal=literal("foo"), + ) + visitor = FooBoundBooleanExpressionVisitor() + result = visit(bound_expression, visitor=visitor) + assert result == ["LESS_THAN"] + + +def test_bound_boolean_expression_visitor_less_than_or_equal() -> None: + bound_expression = BoundLessThanOrEqual( + term=BoundReference( + field=NestedField(field_id=1, name="foo", field_type=StringType(), required=False), + accessor=Accessor(position=0, inner=None), + ), + literal=literal("foo"), + ) + visitor = FooBoundBooleanExpressionVisitor() + result = visit(bound_expression, visitor=visitor) + assert result == ["LESS_THAN_OR_EQUAL"] + + +def test_bound_boolean_expression_visitor_raise_on_unbound_predicate() -> None: + bound_expression = LessThanOrEqual( + term=Reference("foo"), + literal="foo", + ) + visitor = FooBoundBooleanExpressionVisitor() + with pytest.raises(TypeError) as exc_info: + visit(bound_expression, visitor=visitor) + assert "Not a bound predicate" in str(exc_info.value) + + +def test_bound_boolean_expression_visitor_starts_with() -> None: + bound_expression = BoundStartsWith( + term=BoundReference( + field=NestedField(field_id=1, name="foo", field_type=StringType(), required=False), + accessor=Accessor(position=0, inner=None), + ), + literal=literal("foo"), + ) + visitor = FooBoundBooleanExpressionVisitor() + result = visit(bound_expression, visitor=visitor) + assert result == ["STARTS_WITH"] + + +def test_bound_boolean_expression_visitor_not_starts_with() -> None: + bound_expression = BoundNotStartsWith( + term=BoundReference( + field=NestedField(field_id=1, name="foo", field_type=StringType(), required=False), + accessor=Accessor(position=0, inner=None), + ), + literal=literal("foo"), + ) + visitor = FooBoundBooleanExpressionVisitor() + result = visit(bound_expression, visitor=visitor) + assert result == ["NOT_STARTS_WITH"] + + +def _to_byte_buffer(field_type: IcebergType, val: Any) -> bytes: + if not isinstance(field_type, PrimitiveType): + raise ValueError(f"Expected a PrimitiveType, got: {type(field_type)}") + return to_bytes(field_type, val) + + +def _to_manifest_file(*partitions: PartitionFieldSummary) -> ManifestFile: + """Helper to create a ManifestFile""" + return ManifestFile(manifest_path="", manifest_length=0, partition_spec_id=0, partitions=partitions) + + +INT_MIN_VALUE = 30 +INT_MAX_VALUE = 79 + +INT_MIN = _to_byte_buffer(IntegerType(), INT_MIN_VALUE) +INT_MAX = _to_byte_buffer(IntegerType(), INT_MAX_VALUE) + +STRING_MIN = _to_byte_buffer(StringType(), "a") +STRING_MAX = _to_byte_buffer(StringType(), "z") + + +@pytest.fixture +def schema() -> Schema: + return Schema( + NestedField(1, "id", IntegerType(), required=True), + NestedField(2, "all_nulls_missing_nan", StringType(), required=False), + NestedField(3, "some_nulls", StringType(), required=False), + NestedField(4, "no_nulls", StringType(), required=False), + NestedField(5, "float", FloatType(), required=False), + NestedField(6, "all_nulls_double", DoubleType(), required=False), + NestedField(7, "all_nulls_no_nans", FloatType(), required=False), + NestedField(8, "all_nans", DoubleType(), required=False), + NestedField(9, "both_nan_and_null", FloatType(), required=False), + NestedField(10, "no_nan_or_null", DoubleType(), required=False), + NestedField(11, "all_nulls_missing_nan_float", FloatType(), required=False), + NestedField(12, "all_same_value_or_null", StringType(), required=False), + NestedField(13, "no_nulls_same_value_a", StringType(), required=False), + ) + + +@pytest.fixture +def manifest_no_stats() -> ManifestFile: + return _to_manifest_file() + + +@pytest.fixture +def manifest() -> ManifestFile: + return _to_manifest_file( + # id + PartitionFieldSummary( + contains_null=False, + contains_nan=None, + lower_bound=INT_MIN, + upper_bound=INT_MAX, + ), + # all_nulls_missing_nan + PartitionFieldSummary( + contains_null=True, + contains_nan=None, + lower_bound=None, + upper_bound=None, + ), + # some_nulls + PartitionFieldSummary( + contains_null=True, + contains_nan=None, + lower_bound=STRING_MIN, + upper_bound=STRING_MAX, + ), + # no_nulls + PartitionFieldSummary( + contains_null=False, + contains_nan=None, + lower_bound=STRING_MIN, + upper_bound=STRING_MAX, + ), + # float + PartitionFieldSummary( + contains_null=True, + contains_nan=None, + lower_bound=_to_byte_buffer(FloatType(), 0.0), + upper_bound=_to_byte_buffer(FloatType(), 20.0), + ), + # all_nulls_double + PartitionFieldSummary(contains_null=True, contains_nan=None, lower_bound=None, upper_bound=None), + # all_nulls_no_nans + PartitionFieldSummary( + contains_null=True, + contains_nan=False, + lower_bound=None, + upper_bound=None, + ), + # all_nans + PartitionFieldSummary( + contains_null=False, + contains_nan=True, + lower_bound=None, + upper_bound=None, + ), + # both_nan_and_null + PartitionFieldSummary( + contains_null=True, + contains_nan=True, + lower_bound=None, + upper_bound=None, + ), + # no_nan_or_null + PartitionFieldSummary( + contains_null=False, + contains_nan=False, + lower_bound=_to_byte_buffer(FloatType(), 0.0), + upper_bound=_to_byte_buffer(FloatType(), 20.0), + ), + # all_nulls_missing_nan_float + PartitionFieldSummary(contains_null=True, contains_nan=None, lower_bound=None, upper_bound=None), + # all_same_value_or_null + PartitionFieldSummary( + contains_null=True, + contains_nan=None, + lower_bound=STRING_MIN, + upper_bound=STRING_MIN, + ), + # no_nulls_same_value_a + PartitionFieldSummary( + contains_null=False, + contains_nan=None, + lower_bound=STRING_MIN, + upper_bound=STRING_MIN, + ), + ) + + +def test_all_nulls(schema: Schema, manifest: ManifestFile) -> None: + assert not _ManifestEvalVisitor(schema, NotNull(Reference("all_nulls_missing_nan")), case_sensitive=True).eval( + manifest + ), "Should skip: all nulls column with non-floating type contains all null" + + assert _ManifestEvalVisitor(schema, NotNull(Reference("all_nulls_missing_nan_float")), case_sensitive=True).eval( + manifest + ), "Should read: no NaN information may indicate presence of NaN value" + + assert _ManifestEvalVisitor(schema, NotNull(Reference("some_nulls")), case_sensitive=True).eval( + manifest + ), "Should read: column with some nulls contains a non-null value" + + assert _ManifestEvalVisitor(schema, NotNull(Reference("no_nulls")), case_sensitive=True).eval( + manifest + ), "Should read: non-null column contains a non-null value" + + +def test_no_nulls(schema: Schema, manifest: ManifestFile) -> None: + assert _ManifestEvalVisitor(schema, IsNull(Reference("all_nulls_missing_nan")), case_sensitive=True).eval( + manifest + ), "Should read: at least one null value in all null column" + + assert _ManifestEvalVisitor(schema, IsNull(Reference("some_nulls")), case_sensitive=True).eval( + manifest + ), "Should read: column with some nulls contains a null value" + + assert not _ManifestEvalVisitor(schema, IsNull(Reference("no_nulls")), case_sensitive=True).eval( + manifest + ), "Should skip: non-null column contains no null values" + + assert _ManifestEvalVisitor(schema, IsNull(Reference("both_nan_and_null")), case_sensitive=True).eval( + manifest + ), "Should read: both_nan_and_null column contains no null values" + + +def test_is_nan(schema: Schema, manifest: ManifestFile) -> None: + assert _ManifestEvalVisitor(schema, IsNaN(Reference("float")), case_sensitive=True).eval( + manifest + ), "Should read: no information on if there are nan value in float column" + + assert _ManifestEvalVisitor(schema, IsNaN(Reference("all_nulls_double")), case_sensitive=True).eval( + manifest + ), "Should read: no NaN information may indicate presence of NaN value" + + assert _ManifestEvalVisitor(schema, IsNaN(Reference("all_nulls_missing_nan_float")), case_sensitive=True).eval( + manifest + ), "Should read: no NaN information may indicate presence of NaN value" + + assert not _ManifestEvalVisitor(schema, IsNaN(Reference("all_nulls_no_nans")), case_sensitive=True).eval( + manifest + ), "Should skip: no nan column doesn't contain nan value" + + assert _ManifestEvalVisitor(schema, IsNaN(Reference("all_nans")), case_sensitive=True).eval( + manifest + ), "Should read: all_nans column contains nan value" + + assert _ManifestEvalVisitor(schema, IsNaN(Reference("both_nan_and_null")), case_sensitive=True).eval( + manifest + ), "Should read: both_nan_and_null column contains nan value" + + assert not _ManifestEvalVisitor(schema, IsNaN(Reference("no_nan_or_null")), case_sensitive=True).eval( + manifest + ), "Should skip: no_nan_or_null column doesn't contain nan value" + + +def test_not_nan(schema: Schema, manifest: ManifestFile) -> None: + assert _ManifestEvalVisitor(schema, NotNaN(Reference("float")), case_sensitive=True).eval( + manifest + ), "Should read: no information on if there are nan value in float column" + + assert _ManifestEvalVisitor(schema, NotNaN(Reference("all_nulls_double")), case_sensitive=True).eval( + manifest + ), "Should read: all null column contains non nan value" + + assert _ManifestEvalVisitor(schema, NotNaN(Reference("all_nulls_no_nans")), case_sensitive=True).eval( + manifest + ), "Should read: no_nans column contains non nan value" + + assert not _ManifestEvalVisitor(schema, NotNaN(Reference("all_nans")), case_sensitive=True).eval( + manifest + ), "Should skip: all nans column doesn't contain non nan value" + + assert _ManifestEvalVisitor(schema, NotNaN(Reference("both_nan_and_null")), case_sensitive=True).eval( + manifest + ), "Should read: both_nan_and_null nans column contains non nan value" + + assert _ManifestEvalVisitor(schema, NotNaN(Reference("no_nan_or_null")), case_sensitive=True).eval( + manifest + ), "Should read: no_nan_or_null column contains non nan value" + + +def test_missing_stats(schema: Schema, manifest_no_stats: ManifestFile) -> None: + expressions: List[BooleanExpression] = [ + LessThan(Reference("id"), 5), + LessThanOrEqual(Reference("id"), 30), + EqualTo(Reference("id"), 70), + GreaterThan(Reference("id"), 78), + GreaterThanOrEqual(Reference("id"), 90), + NotEqualTo(Reference("id"), 101), + IsNull(Reference("id")), + NotNull(Reference("id")), + IsNaN(Reference("float")), + NotNaN(Reference("float")), + ] + + for expr in expressions: + assert _ManifestEvalVisitor(schema, expr, case_sensitive=True).eval( + manifest_no_stats + ), f"Should read when missing stats for expr: {expr}" + + +def test_not(schema: Schema, manifest: ManifestFile) -> None: + assert _ManifestEvalVisitor(schema, Not(LessThan(Reference("id"), INT_MIN_VALUE - 25)), case_sensitive=True).eval( + manifest + ), "Should read: not(false)" + + assert not _ManifestEvalVisitor(schema, Not(GreaterThan(Reference("id"), INT_MIN_VALUE - 25)), case_sensitive=True).eval( + manifest + ), "Should skip: not(true)" + + +def test_and(schema: Schema, manifest: ManifestFile) -> None: + assert not _ManifestEvalVisitor( + schema, + And( + LessThan(Reference("id"), INT_MIN_VALUE - 25), + GreaterThanOrEqual(Reference("id"), INT_MIN_VALUE - 30), + ), + case_sensitive=True, + ).eval(manifest), "Should skip: and(false, true)" + + assert not _ManifestEvalVisitor( + schema, + And( + LessThan(Reference("id"), INT_MIN_VALUE - 25), + GreaterThanOrEqual(Reference("id"), INT_MAX_VALUE + 1), + ), + case_sensitive=True, + ).eval(manifest), "Should skip: and(false, false)" + + assert _ManifestEvalVisitor( + schema, + And( + GreaterThan(Reference("id"), INT_MIN_VALUE - 25), + LessThanOrEqual(Reference("id"), INT_MIN_VALUE), + ), + case_sensitive=True, + ).eval(manifest), "Should read: and(true, true)" + + +def test_or(schema: Schema, manifest: ManifestFile) -> None: + assert not _ManifestEvalVisitor( + schema, + Or( + LessThan(Reference("id"), INT_MIN_VALUE - 25), + GreaterThanOrEqual(Reference("id"), INT_MAX_VALUE + 1), + ), + case_sensitive=True, + ).eval(manifest), "Should skip: or(false, false)" + + assert _ManifestEvalVisitor( + schema, + Or( + LessThan(Reference("id"), INT_MIN_VALUE - 25), + GreaterThanOrEqual(Reference("id"), INT_MAX_VALUE - 19), + ), + case_sensitive=True, + ).eval(manifest), "Should read: or(false, true)" + + +def test_integer_lt(schema: Schema, manifest: ManifestFile) -> None: + assert not _ManifestEvalVisitor(schema, LessThan(Reference("id"), INT_MIN_VALUE - 25), case_sensitive=True).eval( + manifest + ), "Should not read: id range below lower bound (5 < 30)" + + assert not _ManifestEvalVisitor(schema, LessThan(Reference("id"), INT_MIN_VALUE), case_sensitive=True).eval( + manifest + ), "Should not read: id range below lower bound (30 is not < 30)" + + assert _ManifestEvalVisitor(schema, LessThan(Reference("id"), INT_MIN_VALUE + 1), case_sensitive=True).eval( + manifest + ), "Should read: one possible id" + + assert _ManifestEvalVisitor(schema, LessThan(Reference("id"), INT_MAX_VALUE), case_sensitive=True).eval( + manifest + ), "Should read: may possible ids" + + +def test_integer_lt_eq(schema: Schema, manifest: ManifestFile) -> None: + assert not _ManifestEvalVisitor(schema, LessThanOrEqual(Reference("id"), INT_MIN_VALUE - 25), case_sensitive=True).eval( + manifest + ), "Should not read: id range below lower bound (5 < 30)" + + assert not _ManifestEvalVisitor(schema, LessThanOrEqual(Reference("id"), INT_MIN_VALUE - 1), case_sensitive=True).eval( + manifest + ), "Should not read: id range below lower bound (29 < 30)" + + assert _ManifestEvalVisitor(schema, LessThanOrEqual(Reference("id"), INT_MIN_VALUE), case_sensitive=True).eval( + manifest + ), "Should read: one possible id" + + assert _ManifestEvalVisitor(schema, LessThanOrEqual(Reference("id"), INT_MAX_VALUE), case_sensitive=True).eval( + manifest + ), "Should read: many possible ids" + + +def test_integer_gt(schema: Schema, manifest: ManifestFile) -> None: + assert not _ManifestEvalVisitor(schema, GreaterThan(Reference("id"), INT_MAX_VALUE + 6), case_sensitive=True).eval( + manifest + ), "Should not read: id range above upper bound (85 < 79)" + + assert not _ManifestEvalVisitor(schema, GreaterThan(Reference("id"), INT_MAX_VALUE), case_sensitive=True).eval( + manifest + ), "Should not read: id range above upper bound (79 is not > 79)" + + assert _ManifestEvalVisitor(schema, GreaterThan(Reference("id"), INT_MAX_VALUE - 1), case_sensitive=True).eval( + manifest + ), "Should read: one possible id" + + assert _ManifestEvalVisitor(schema, GreaterThan(Reference("id"), INT_MAX_VALUE - 4), case_sensitive=True).eval( + manifest + ), "Should read: may possible ids" + + +def test_integer_gt_eq(schema: Schema, manifest: ManifestFile) -> None: + assert not _ManifestEvalVisitor(schema, GreaterThanOrEqual(Reference("id"), INT_MAX_VALUE + 6), case_sensitive=True).eval( + manifest + ), "Should not read: id range above upper bound (85 < 79)" + + assert not _ManifestEvalVisitor(schema, GreaterThanOrEqual(Reference("id"), INT_MAX_VALUE + 1), case_sensitive=True).eval( + manifest + ), "Should not read: id range above upper bound (80 > 79)" + + assert _ManifestEvalVisitor(schema, GreaterThanOrEqual(Reference("id"), INT_MAX_VALUE), case_sensitive=True).eval( + manifest + ), "Should read: one possible id" + + assert _ManifestEvalVisitor(schema, GreaterThanOrEqual(Reference("id"), INT_MAX_VALUE), case_sensitive=True).eval( + manifest + ), "Should read: may possible ids" + + +def test_integer_eq(schema: Schema, manifest: ManifestFile) -> None: + assert not _ManifestEvalVisitor(schema, EqualTo(Reference("id"), INT_MIN_VALUE - 25), case_sensitive=True).eval( + manifest + ), "Should not read: id below lower bound" + + assert not _ManifestEvalVisitor(schema, EqualTo(Reference("id"), INT_MIN_VALUE - 1), case_sensitive=True).eval( + manifest + ), "Should not read: id below lower bound" + + assert _ManifestEvalVisitor(schema, EqualTo(Reference("id"), INT_MIN_VALUE), case_sensitive=True).eval( + manifest + ), "Should read: id equal to lower bound" + + assert _ManifestEvalVisitor(schema, EqualTo(Reference("id"), INT_MAX_VALUE - 4), case_sensitive=True).eval( + manifest + ), "Should read: id between lower and upper bounds" + + assert _ManifestEvalVisitor(schema, EqualTo(Reference("id"), INT_MAX_VALUE), case_sensitive=True).eval( + manifest + ), "Should read: id equal to upper bound" + + assert not _ManifestEvalVisitor(schema, EqualTo(Reference("id"), INT_MAX_VALUE + 1), case_sensitive=True).eval( + manifest + ), "Should not read: id above upper bound" + + assert not _ManifestEvalVisitor(schema, EqualTo(Reference("id"), INT_MAX_VALUE + 6), case_sensitive=True).eval( + manifest + ), "Should not read: id above upper bound" + + +def test_integer_not_eq(schema: Schema, manifest: ManifestFile) -> None: + assert _ManifestEvalVisitor(schema, NotEqualTo(Reference("id"), INT_MIN_VALUE - 25), case_sensitive=True).eval( + manifest + ), "Should read: id below lower bound" + + assert _ManifestEvalVisitor(schema, NotEqualTo(Reference("id"), INT_MIN_VALUE - 1), case_sensitive=True).eval( + manifest + ), "Should read: id below lower bound" + + assert _ManifestEvalVisitor(schema, NotEqualTo(Reference("id"), INT_MIN_VALUE), case_sensitive=True).eval( + manifest + ), "Should read: id equal to lower bound" + + assert _ManifestEvalVisitor(schema, NotEqualTo(Reference("id"), INT_MAX_VALUE - 4), case_sensitive=True).eval( + manifest + ), "Should read: id between lower and upper bounds" + + assert _ManifestEvalVisitor(schema, NotEqualTo(Reference("id"), INT_MAX_VALUE), case_sensitive=True).eval( + manifest + ), "Should read: id equal to upper bound" + + assert _ManifestEvalVisitor(schema, NotEqualTo(Reference("id"), INT_MAX_VALUE + 1), case_sensitive=True).eval( + manifest + ), "Should read: id above upper bound" + + assert _ManifestEvalVisitor(schema, NotEqualTo(Reference("id"), INT_MAX_VALUE + 6), case_sensitive=True).eval( + manifest + ), "Should read: id above upper bound" + + +def test_integer_not_eq_rewritten(schema: Schema, manifest: ManifestFile) -> None: + assert _ManifestEvalVisitor(schema, Not(EqualTo(Reference("id"), INT_MIN_VALUE - 25)), case_sensitive=True).eval( + manifest + ), "Should read: id below lower bound" + + assert _ManifestEvalVisitor(schema, Not(EqualTo(Reference("id"), INT_MIN_VALUE - 1)), case_sensitive=True).eval( + manifest + ), "Should read: id below lower bound" + + assert _ManifestEvalVisitor(schema, Not(EqualTo(Reference("id"), INT_MIN_VALUE)), case_sensitive=True).eval( + manifest + ), "Should read: id equal to lower bound" + + assert _ManifestEvalVisitor(schema, Not(EqualTo(Reference("id"), INT_MAX_VALUE - 4)), case_sensitive=True).eval( + manifest + ), "Should read: id between lower and upper bounds" + + assert _ManifestEvalVisitor(schema, Not(EqualTo(Reference("id"), INT_MAX_VALUE)), case_sensitive=True).eval( + manifest + ), "Should read: id equal to upper bound" + + assert _ManifestEvalVisitor(schema, Not(EqualTo(Reference("id"), INT_MAX_VALUE + 1)), case_sensitive=True).eval( + manifest + ), "Should read: id above upper bound" + + assert _ManifestEvalVisitor(schema, Not(EqualTo(Reference("id"), INT_MAX_VALUE + 6)), case_sensitive=True).eval( + manifest + ), "Should read: id above upper bound" + + +def test_integer_not_eq_rewritten_case_insensitive(schema: Schema, manifest: ManifestFile) -> None: + assert _ManifestEvalVisitor(schema, Not(EqualTo(Reference("ID"), INT_MIN_VALUE - 25)), case_sensitive=False).eval( + manifest + ), "Should read: id below lower bound" + + assert _ManifestEvalVisitor(schema, Not(EqualTo(Reference("ID"), INT_MIN_VALUE - 1)), case_sensitive=False).eval( + manifest + ), "Should read: id below lower bound" + + assert _ManifestEvalVisitor(schema, Not(EqualTo(Reference("ID"), INT_MIN_VALUE)), case_sensitive=False).eval( + manifest + ), "Should read: id equal to lower bound" + + assert _ManifestEvalVisitor(schema, Not(EqualTo(Reference("ID"), INT_MAX_VALUE - 4)), case_sensitive=False).eval( + manifest + ), "Should read: id between lower and upper bounds" + + assert _ManifestEvalVisitor(schema, Not(EqualTo(Reference("ID"), INT_MAX_VALUE)), case_sensitive=False).eval( + manifest + ), "Should read: id equal to upper bound" + + assert _ManifestEvalVisitor(schema, Not(EqualTo(Reference("ID"), INT_MAX_VALUE + 1)), case_sensitive=False).eval( + manifest + ), "Should read: id above upper bound" + + assert _ManifestEvalVisitor(schema, Not(EqualTo(Reference("ID"), INT_MAX_VALUE + 6)), case_sensitive=False).eval( + manifest + ), "Should read: id above upper bound" + + +def test_integer_in(schema: Schema, manifest: ManifestFile) -> None: + assert not _ManifestEvalVisitor( + schema, In(Reference("id"), (INT_MIN_VALUE - 25, INT_MIN_VALUE - 24)), case_sensitive=True + ).eval(manifest), "Should not read: id below lower bound (5 < 30, 6 < 30)" + + assert not _ManifestEvalVisitor( + schema, In(Reference("id"), (INT_MIN_VALUE - 2, INT_MIN_VALUE - 1)), case_sensitive=True + ).eval(manifest), "Should not read: id below lower bound (28 < 30, 29 < 30)" + + assert _ManifestEvalVisitor(schema, In(Reference("id"), (INT_MIN_VALUE - 1, INT_MIN_VALUE)), case_sensitive=True).eval( + manifest + ), "Should read: id equal to lower bound (30 == 30)" + + assert _ManifestEvalVisitor(schema, In(Reference("id"), (INT_MAX_VALUE - 4, INT_MAX_VALUE - 3)), case_sensitive=True).eval( + manifest + ), "Should read: id between lower and upper bounds (30 < 75 < 79, 30 < 76 < 79)" + + assert _ManifestEvalVisitor(schema, In(Reference("id"), (INT_MAX_VALUE, INT_MAX_VALUE + 1)), case_sensitive=True).eval( + manifest + ), "Should read: id equal to upper bound (79 == 79)" + + assert not _ManifestEvalVisitor( + schema, In(Reference("id"), (INT_MAX_VALUE + 1, INT_MAX_VALUE + 2)), case_sensitive=True + ).eval(manifest), "Should not read: id above upper bound (80 > 79, 81 > 79)" + + assert not _ManifestEvalVisitor( + schema, In(Reference("id"), (INT_MAX_VALUE + 6, INT_MAX_VALUE + 7)), case_sensitive=True + ).eval(manifest), "Should not read: id above upper bound (85 > 79, 86 > 79)" + + assert not _ManifestEvalVisitor(schema, In(Reference("all_nulls_missing_nan"), ("abc", "def")), case_sensitive=True).eval( + manifest + ), "Should skip: in on all nulls column" + + assert _ManifestEvalVisitor(schema, In(Reference("some_nulls"), ("abc", "def")), case_sensitive=True).eval( + manifest + ), "Should read: in on some nulls column" + + assert _ManifestEvalVisitor(schema, In(Reference("no_nulls"), ("abc", "def")), case_sensitive=True).eval( + manifest + ), "Should read: in on no nulls column" + + +def test_integer_not_in(schema: Schema, manifest: ManifestFile) -> None: + assert _ManifestEvalVisitor( + schema, NotIn(Reference("id"), (INT_MIN_VALUE - 25, INT_MIN_VALUE - 24)), case_sensitive=True + ).eval(manifest), "Should read: id below lower bound (5 < 30, 6 < 30)" + + assert _ManifestEvalVisitor(schema, NotIn(Reference("id"), (INT_MIN_VALUE - 2, INT_MIN_VALUE - 1)), case_sensitive=True).eval( + manifest + ), "Should read: id below lower bound (28 < 30, 29 < 30)" + + assert _ManifestEvalVisitor(schema, NotIn(Reference("id"), (INT_MIN_VALUE - 1, INT_MIN_VALUE)), case_sensitive=True).eval( + manifest + ), "Should read: id equal to lower bound (30 == 30)" + + assert _ManifestEvalVisitor(schema, NotIn(Reference("id"), (INT_MAX_VALUE - 4, INT_MAX_VALUE - 3)), case_sensitive=True).eval( + manifest + ), "Should read: id between lower and upper bounds (30 < 75 < 79, 30 < 76 < 79)" + + assert _ManifestEvalVisitor(schema, NotIn(Reference("id"), (INT_MAX_VALUE, INT_MAX_VALUE + 1)), case_sensitive=True).eval( + manifest + ), "Should read: id equal to upper bound (79 == 79)" + + assert _ManifestEvalVisitor(schema, NotIn(Reference("id"), (INT_MAX_VALUE + 1, INT_MAX_VALUE + 2)), case_sensitive=True).eval( + manifest + ), "Should read: id above upper bound (80 > 79, 81 > 79)" + + assert _ManifestEvalVisitor(schema, NotIn(Reference("id"), (INT_MAX_VALUE + 6, INT_MAX_VALUE + 7)), case_sensitive=True).eval( + manifest + ), "Should read: id above upper bound (85 > 79, 86 > 79)" + + assert _ManifestEvalVisitor(schema, NotIn(Reference("all_nulls_missing_nan"), ("abc", "def")), case_sensitive=True).eval( + manifest + ), "Should read: notIn on no nulls column" + + assert _ManifestEvalVisitor(schema, NotIn(Reference("some_nulls"), ("abc", "def")), case_sensitive=True).eval( + manifest + ), "Should read: in on some nulls column" + + assert _ManifestEvalVisitor(schema, NotIn(Reference("no_nulls"), ("abc", "def")), case_sensitive=True).eval( + manifest + ), "Should read: in on no nulls column" + + +def test_string_starts_with(schema: Schema, manifest: ManifestFile) -> None: + assert _ManifestEvalVisitor(schema, StartsWith(Reference("some_nulls"), "a"), case_sensitive=False).eval( + manifest + ), "Should read: range matches" + + assert _ManifestEvalVisitor(schema, StartsWith(Reference("some_nulls"), "aa"), case_sensitive=False).eval( + manifest + ), "Should read: range matches" + + assert _ManifestEvalVisitor(schema, StartsWith(Reference("some_nulls"), "dddd"), case_sensitive=False).eval( + manifest + ), "Should read: range matches" + + assert _ManifestEvalVisitor(schema, StartsWith(Reference("some_nulls"), "z"), case_sensitive=False).eval( + manifest + ), "Should read: range matches" + + assert _ManifestEvalVisitor(schema, StartsWith(Reference("no_nulls"), "a"), case_sensitive=False).eval( + manifest + ), "Should read: range matches" + + assert not _ManifestEvalVisitor(schema, StartsWith(Reference("some_nulls"), "zzzz"), case_sensitive=False).eval( + manifest + ), "Should skip: range doesn't match" + + assert not _ManifestEvalVisitor(schema, StartsWith(Reference("some_nulls"), "1"), case_sensitive=False).eval( + manifest + ), "Should skip: range doesn't match" + + +def test_string_not_starts_with(schema: Schema, manifest: ManifestFile) -> None: + assert _ManifestEvalVisitor(schema, NotStartsWith(Reference("some_nulls"), "a"), case_sensitive=False).eval( + manifest + ), "Should read: range matches" + + assert _ManifestEvalVisitor(schema, NotStartsWith(Reference("some_nulls"), "aa"), case_sensitive=False).eval( + manifest + ), "Should read: range matches" + + assert _ManifestEvalVisitor(schema, NotStartsWith(Reference("some_nulls"), "dddd"), case_sensitive=False).eval( + manifest + ), "Should read: range matches" + + assert _ManifestEvalVisitor(schema, NotStartsWith(Reference("some_nulls"), "z"), case_sensitive=False).eval( + manifest + ), "Should read: range matches" + + assert _ManifestEvalVisitor(schema, NotStartsWith(Reference("no_nulls"), "a"), case_sensitive=False).eval( + manifest + ), "Should read: range matches" + + assert _ManifestEvalVisitor(schema, NotStartsWith(Reference("some_nulls"), "zzzz"), case_sensitive=False).eval( + manifest + ), "Should read: range matches" + + assert _ManifestEvalVisitor(schema, NotStartsWith(Reference("some_nulls"), "1"), case_sensitive=False).eval( + manifest + ), "Should read: range matches" + + assert _ManifestEvalVisitor(schema, NotStartsWith(Reference("all_same_value_or_null"), "a"), case_sensitive=False).eval( + manifest + ), "Should read: range matches" + + assert _ManifestEvalVisitor(schema, NotStartsWith(Reference("all_same_value_or_null"), "aa"), case_sensitive=False).eval( + manifest + ), "Should read: range matches" + + assert _ManifestEvalVisitor(schema, NotStartsWith(Reference("all_same_value_or_null"), "A"), case_sensitive=False).eval( + manifest + ), "Should read: range matches" + + # Iceberg does not implement SQL's 3-way boolean logic, so the choice of an all null column + # matching is + # by definition in order to surface more values to the query engine to allow it to make its own + # decision. + assert _ManifestEvalVisitor(schema, NotStartsWith(Reference("all_nulls_missing_nan"), "A"), case_sensitive=False).eval( + manifest + ), "Should read: range matches" + + assert not _ManifestEvalVisitor(schema, NotStartsWith(Reference("no_nulls_same_value_a"), "a"), case_sensitive=False).eval( + manifest + ), "Should not read: all values start with the prefix" + + +def test_rewrite_not_equal_to() -> None: + assert rewrite_not(Not(EqualTo(Reference("x"), 34.56))) == NotEqualTo(Reference("x"), 34.56) + + +def test_rewrite_not_not_equal_to() -> None: + assert rewrite_not(Not(NotEqualTo(Reference("x"), 34.56))) == EqualTo(Reference("x"), 34.56) + + +def test_rewrite_not_in() -> None: + assert rewrite_not(Not(In(Reference("x"), (34.56,)))) == NotIn(Reference("x"), (34.56,)) + + +def test_rewrite_and() -> None: + assert rewrite_not( + Not( + And( + EqualTo(Reference("x"), 34.56), + EqualTo(Reference("y"), 34.56), + ) + ) + ) == Or( + NotEqualTo(term=Reference(name="x"), literal=34.56), + NotEqualTo(term=Reference(name="y"), literal=34.56), + ) + + +def test_rewrite_or() -> None: + assert rewrite_not( + Not( + Or( + EqualTo(Reference("x"), 34.56), + EqualTo(Reference("y"), 34.56), + ) + ) + ) == And( + NotEqualTo(term=Reference(name="x"), literal=34.56), + NotEqualTo(term=Reference(name="y"), literal=34.56), + ) + + +def test_rewrite_always_false() -> None: + assert rewrite_not(Not(AlwaysFalse())) == AlwaysTrue() + + +def test_rewrite_always_true() -> None: + assert rewrite_not(Not(AlwaysTrue())) == AlwaysFalse() + + +def test_rewrite_bound() -> None: + schema = Schema(NestedField(2, "a", IntegerType(), required=False), schema_id=1) + assert rewrite_not(IsNull(Reference("a")).bind(schema)) == BoundIsNull( + term=BoundReference( + field=NestedField(field_id=2, name="a", field_type=IntegerType(), required=False), + accessor=Accessor(position=0, inner=None), + ) + ) + + +def test_to_dnf() -> None: + expr = Or(Not(EqualTo("P", "a")), And(EqualTo("Q", "b"), Not(Or(Not(EqualTo("R", "c")), EqualTo("S", "d"))))) + assert rewrite_to_dnf(expr) == (NotEqualTo("P", "a"), And(EqualTo("Q", "b"), And(EqualTo("R", "c"), NotEqualTo("S", "d")))) + + +def test_to_dnf_nested_or() -> None: + expr = Or(EqualTo("P", "a"), And(EqualTo("Q", "b"), Or(EqualTo("R", "c"), EqualTo("S", "d")))) + assert rewrite_to_dnf(expr) == ( + EqualTo("P", "a"), + And(EqualTo("Q", "b"), EqualTo("R", "c")), + And(EqualTo("Q", "b"), EqualTo("S", "d")), + ) + + +def test_to_dnf_double_distribution() -> None: + expr = And(Or(EqualTo("P", "a"), EqualTo("Q", "b")), Or(EqualTo("R", "c"), EqualTo("S", "d"))) + assert rewrite_to_dnf(expr) == ( + And( + left=EqualTo(term=Reference(name="P"), literal=literal("a")), + right=EqualTo(term=Reference(name="R"), literal=literal("c")), + ), + And( + left=EqualTo(term=Reference(name="P"), literal=literal("a")), + right=EqualTo(term=Reference(name="S"), literal=literal("d")), + ), + And( + left=EqualTo(term=Reference(name="Q"), literal=literal("b")), + right=EqualTo(term=Reference(name="R"), literal=literal("c")), + ), + And( + left=EqualTo(term=Reference(name="Q"), literal=literal("b")), + right=EqualTo(term=Reference(name="S"), literal=literal("d")), + ), + ) + + +def test_to_dnf_double_negation() -> None: + expr = rewrite_to_dnf(Not(Not(Not(Not(Not(Not(EqualTo("P", "a")))))))) + assert expr == (EqualTo("P", "a"),) + + +def test_to_dnf_and() -> None: + expr = And(Not(EqualTo("Q", "b")), EqualTo("R", "c")) + assert rewrite_to_dnf(expr) == (And(NotEqualTo("Q", "b"), EqualTo("R", "c")),) + + +def test_to_dnf_not_and() -> None: + expr = Not(And(Not(EqualTo("Q", "b")), EqualTo("R", "c"))) + assert rewrite_to_dnf(expr) == (EqualTo("Q", "b"), NotEqualTo("R", "c")) + + +def test_dnf_to_dask(table_schema_simple: Schema) -> None: + expr = ( + BoundGreaterThan[str]( + term=BoundReference(table_schema_simple.find_field(1), table_schema_simple.accessor_for_field(1)), + literal=literal("hello"), + ), + And( + BoundIn[int]( + term=BoundReference(table_schema_simple.find_field(2), table_schema_simple.accessor_for_field(2)), + literals={literal(1), literal(2), literal(3)}, + ), + BoundEqualTo[bool]( + term=BoundReference(table_schema_simple.find_field(3), table_schema_simple.accessor_for_field(3)), + literal=literal(True), + ), + ), + ) + assert expression_to_plain_format(expr) == [[("foo", ">", "hello")], [("bar", "in", {1, 2, 3}), ("baz", "==", True)]] + + +def test_expression_evaluator_null() -> None: + struct = Record(a=None) + schema = Schema(NestedField(1, "a", IntegerType(), required=False), schema_id=1) + assert expression_evaluator(schema, In("a", {1, 2, 3}), case_sensitive=True)(struct) is False + assert expression_evaluator(schema, NotIn("a", {1, 2, 3}), case_sensitive=True)(struct) is True + assert expression_evaluator(schema, IsNaN("a"), case_sensitive=True)(struct) is False + assert expression_evaluator(schema, NotNaN("a"), case_sensitive=True)(struct) is True + assert expression_evaluator(schema, IsNull("a"), case_sensitive=True)(struct) is True + assert expression_evaluator(schema, NotNull("a"), case_sensitive=True)(struct) is False + assert expression_evaluator(schema, EqualTo("a", 1), case_sensitive=True)(struct) is False + assert expression_evaluator(schema, NotEqualTo("a", 1), case_sensitive=True)(struct) is True + assert expression_evaluator(schema, GreaterThanOrEqual("a", 1), case_sensitive=True)(struct) is False + assert expression_evaluator(schema, GreaterThan("a", 1), case_sensitive=True)(struct) is False + assert expression_evaluator(schema, LessThanOrEqual("a", 1), case_sensitive=True)(struct) is False + assert expression_evaluator(schema, LessThan("a", 1), case_sensitive=True)(struct) is False + assert expression_evaluator(schema, StartsWith("a", 1), case_sensitive=True)(struct) is False + assert expression_evaluator(schema, NotStartsWith("a", 1), case_sensitive=True)(struct) is True diff --git a/tests/io/test_fsspec.py b/tests/io/test_fsspec.py new file mode 100644 index 0000000000..f83268b56f --- /dev/null +++ b/tests/io/test_fsspec.py @@ -0,0 +1,655 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import uuid + +import pytest +from botocore.awsrequest import AWSRequest +from requests_mock import Mocker + +from pyiceberg.exceptions import SignError +from pyiceberg.io import fsspec +from pyiceberg.io.fsspec import FsspecFileIO, s3v4_rest_signer +from pyiceberg.io.pyarrow import PyArrowFileIO + + +@pytest.mark.s3 +def test_fsspec_new_input_file(fsspec_fileio: FsspecFileIO) -> None: + """Test creating a new input file from a fsspec file-io""" + filename = str(uuid.uuid4()) + + input_file = fsspec_fileio.new_input(f"s3://warehouse/{filename}") + + assert isinstance(input_file, fsspec.FsspecInputFile) + assert input_file.location == f"s3://warehouse/{filename}" + + +@pytest.mark.s3 +def test_fsspec_new_s3_output_file(fsspec_fileio: FsspecFileIO) -> None: + """Test creating a new output file from an fsspec file-io""" + filename = str(uuid.uuid4()) + + output_file = fsspec_fileio.new_output(f"s3://warehouse/{filename}") + + assert isinstance(output_file, fsspec.FsspecOutputFile) + assert output_file.location == f"s3://warehouse/{filename}" + + +@pytest.mark.s3 +def test_fsspec_write_and_read_file(fsspec_fileio: FsspecFileIO) -> None: + """Test writing and reading a file using FsspecInputFile and FsspecOutputFile""" + filename = str(uuid.uuid4()) + output_file = fsspec_fileio.new_output(location=f"s3://warehouse/{filename}") + with output_file.create() as f: + f.write(b"foo") + + input_file = fsspec_fileio.new_input(f"s3://warehouse/{filename}") + assert input_file.open().read() == b"foo" + + fsspec_fileio.delete(input_file) + + +@pytest.mark.s3 +def test_fsspec_getting_length_of_file(fsspec_fileio: FsspecFileIO) -> None: + """Test getting the length of an FsspecInputFile and FsspecOutputFile""" + filename = str(uuid.uuid4()) + + output_file = fsspec_fileio.new_output(location=f"s3://warehouse/{filename}") + with output_file.create() as f: + f.write(b"foobar") + + assert len(output_file) == 6 + + input_file = fsspec_fileio.new_input(location=f"s3://warehouse/{filename}") + assert len(input_file) == 6 + + fsspec_fileio.delete(output_file) + + +@pytest.mark.s3 +def test_fsspec_file_tell(fsspec_fileio: FsspecFileIO) -> None: + """Test finding cursor position for an fsspec file-io file""" + + filename = str(uuid.uuid4()) + + output_file = fsspec_fileio.new_output(location=f"s3://warehouse/{filename}") + with output_file.create() as write_file: + write_file.write(b"foobar") + + input_file = fsspec_fileio.new_input(location=f"s3://warehouse/{filename}") + f = input_file.open() + + f.seek(0) + assert f.tell() == 0 + f.seek(1) + assert f.tell() == 1 + f.seek(3) + assert f.tell() == 3 + f.seek(0) + assert f.tell() == 0 + + +@pytest.mark.s3 +def test_fsspec_read_specified_bytes_for_file(fsspec_fileio: FsspecFileIO) -> None: + """Test reading a specified number of bytes from an fsspec file-io file""" + + filename = str(uuid.uuid4()) + output_file = fsspec_fileio.new_output(location=f"s3://warehouse/{filename}") + with output_file.create() as write_file: + write_file.write(b"foo") + + input_file = fsspec_fileio.new_input(location=f"s3://warehouse/{filename}") + f = input_file.open() + + f.seek(0) + assert b"f" == f.read(1) + f.seek(0) + assert b"fo" == f.read(2) + f.seek(1) + assert b"o" == f.read(1) + f.seek(1) + assert b"oo" == f.read(2) + f.seek(0) + assert b"foo" == f.read(999) # test reading amount larger than entire content length + + fsspec_fileio.delete(input_file) + + +@pytest.mark.s3 +def test_fsspec_raise_on_opening_file_not_found(fsspec_fileio: FsspecFileIO) -> None: + """Test that an fsspec input file raises appropriately when the s3 file is not found""" + + filename = str(uuid.uuid4()) + input_file = fsspec_fileio.new_input(location=f"s3://warehouse/{filename}") + with pytest.raises(FileNotFoundError) as exc_info: + input_file.open().read() + + assert filename in str(exc_info.value) + + +@pytest.mark.s3 +def test_checking_if_a_file_exists(fsspec_fileio: FsspecFileIO) -> None: + """Test checking if a file exists""" + + non_existent_file = fsspec_fileio.new_input(location="s3://warehouse/does-not-exist.txt") + assert not non_existent_file.exists() + + filename = str(uuid.uuid4()) + output_file = fsspec_fileio.new_output(location=f"s3://warehouse/{filename}") + assert not output_file.exists() + with output_file.create() as f: + f.write(b"foo") + + existing_input_file = fsspec_fileio.new_input(location=f"s3://warehouse/{filename}") + assert existing_input_file.exists() + + existing_output_file = fsspec_fileio.new_output(location=f"s3://warehouse/{filename}") + assert existing_output_file.exists() + + fsspec_fileio.delete(existing_output_file) + + +@pytest.mark.s3 +def test_closing_a_file(fsspec_fileio: FsspecFileIO) -> None: + """Test closing an output file and input file""" + filename = str(uuid.uuid4()) + output_file = fsspec_fileio.new_output(location=f"s3://warehouse/{filename}") + with output_file.create() as write_file: + write_file.write(b"foo") + assert not write_file.closed # type: ignore + assert write_file.closed # type: ignore + + input_file = fsspec_fileio.new_input(location=f"s3://warehouse/{filename}") + f = input_file.open() + assert not f.closed # type: ignore + f.close() + assert f.closed # type: ignore + + fsspec_fileio.delete(f"s3://warehouse/{filename}") + + +@pytest.mark.s3 +def test_fsspec_converting_an_outputfile_to_an_inputfile(fsspec_fileio: FsspecFileIO) -> None: + """Test converting an output file to an input file""" + filename = str(uuid.uuid4()) + output_file = fsspec_fileio.new_output(location=f"s3://warehouse/{filename}") + input_file = output_file.to_input_file() + assert input_file.location == output_file.location + + +@pytest.mark.s3 +def test_writing_avro_file(generated_manifest_entry_file: str, fsspec_fileio: FsspecFileIO) -> None: + """Test that bytes match when reading a local avro file, writing it using fsspec file-io, and then reading it again""" + filename = str(uuid.uuid4()) + with PyArrowFileIO().new_input(location=generated_manifest_entry_file).open() as f: + b1 = f.read() + with fsspec_fileio.new_output(location=f"s3://warehouse/{filename}").create() as out_f: + out_f.write(b1) + with fsspec_fileio.new_input(location=f"s3://warehouse/{filename}").open() as in_f: + b2 = in_f.read() + assert b1 == b2 # Check that bytes of read from local avro file match bytes written to s3 + + fsspec_fileio.delete(f"s3://warehouse/{filename}") + + +@pytest.mark.adlfs +def test_fsspec_new_input_file_adlfs(adlfs_fsspec_fileio: FsspecFileIO) -> None: + """Test creating a new input file from an fsspec file-io""" + filename = str(uuid.uuid4()) + + input_file = adlfs_fsspec_fileio.new_input(f"abfss://tests/{filename}") + + assert isinstance(input_file, fsspec.FsspecInputFile) + assert input_file.location == f"abfss://tests/{filename}" + + +@pytest.mark.adlfs +def test_fsspec_new_abfss_output_file_adlfs(adlfs_fsspec_fileio: FsspecFileIO) -> None: + """Test creating a new output file from an fsspec file-io""" + filename = str(uuid.uuid4()) + + output_file = adlfs_fsspec_fileio.new_output(f"abfss://tests/{filename}") + + assert isinstance(output_file, fsspec.FsspecOutputFile) + assert output_file.location == f"abfss://tests/{filename}" + + +@pytest.mark.adlfs +def test_fsspec_write_and_read_file_adlfs(adlfs_fsspec_fileio: FsspecFileIO) -> None: + """Test writing and reading a file using FsspecInputFile and FsspecOutputFile""" + filename = str(uuid.uuid4()) + output_file = adlfs_fsspec_fileio.new_output(location=f"abfss://tests/{filename}") + with output_file.create() as f: + f.write(b"foo") + + input_file = adlfs_fsspec_fileio.new_input(f"abfss://tests/{filename}") + assert input_file.open().read() == b"foo" + + adlfs_fsspec_fileio.delete(input_file) + + +@pytest.mark.adlfs +def test_fsspec_getting_length_of_file_adlfs(adlfs_fsspec_fileio: FsspecFileIO) -> None: + """Test getting the length of an FsspecInputFile and FsspecOutputFile""" + filename = str(uuid.uuid4()) + + output_file = adlfs_fsspec_fileio.new_output(location=f"abfss://tests/{filename}") + with output_file.create() as f: + f.write(b"foobar") + + assert len(output_file) == 6 + + input_file = adlfs_fsspec_fileio.new_input(location=f"abfss://tests/{filename}") + assert len(input_file) == 6 + + adlfs_fsspec_fileio.delete(output_file) + + +@pytest.mark.adlfs +def test_fsspec_file_tell_adlfs(adlfs_fsspec_fileio: FsspecFileIO) -> None: + """Test finding cursor position for an fsspec file-io file""" + + filename = str(uuid.uuid4()) + + output_file = adlfs_fsspec_fileio.new_output(location=f"abfss://tests/{filename}") + with output_file.create() as write_file: + write_file.write(b"foobar") + + input_file = adlfs_fsspec_fileio.new_input(location=f"abfss://tests/{filename}") + f = input_file.open() + + f.seek(0) + assert f.tell() == 0 + f.seek(1) + assert f.tell() == 1 + f.seek(3) + assert f.tell() == 3 + f.seek(0) + assert f.tell() == 0 + + adlfs_fsspec_fileio.delete(f"abfss://tests/{filename}") + + +@pytest.mark.adlfs +def test_fsspec_read_specified_bytes_for_file_adlfs(adlfs_fsspec_fileio: FsspecFileIO) -> None: + """Test reading a specified number of bytes from an fsspec file-io file""" + + filename = str(uuid.uuid4()) + output_file = adlfs_fsspec_fileio.new_output(location=f"abfss://tests/{filename}") + with output_file.create() as write_file: + write_file.write(b"foo") + + input_file = adlfs_fsspec_fileio.new_input(location=f"abfss://tests/{filename}") + f = input_file.open() + + f.seek(0) + assert b"f" == f.read(1) + f.seek(0) + assert b"fo" == f.read(2) + f.seek(1) + assert b"o" == f.read(1) + f.seek(1) + assert b"oo" == f.read(2) + f.seek(0) + assert b"foo" == f.read(999) # test reading amount larger than entire content length + + adlfs_fsspec_fileio.delete(input_file) + + +@pytest.mark.adlfs +def test_fsspec_raise_on_opening_file_not_found_adlfs(adlfs_fsspec_fileio: FsspecFileIO) -> None: + """Test that an fsspec input file raises appropriately when the adlfs file is not found""" + + filename = str(uuid.uuid4()) + input_file = adlfs_fsspec_fileio.new_input(location=f"abfss://tests/{filename}") + with pytest.raises(FileNotFoundError) as exc_info: + input_file.open().read() + + assert filename in str(exc_info.value) + + +@pytest.mark.adlfs +def test_checking_if_a_file_exists_adlfs(adlfs_fsspec_fileio: FsspecFileIO) -> None: + """Test checking if a file exists""" + + non_existent_file = adlfs_fsspec_fileio.new_input(location="abfss://tests/does-not-exist.txt") + assert not non_existent_file.exists() + + filename = str(uuid.uuid4()) + output_file = adlfs_fsspec_fileio.new_output(location=f"abfss://tests/{filename}") + assert not output_file.exists() + with output_file.create() as f: + f.write(b"foo") + + existing_input_file = adlfs_fsspec_fileio.new_input(location=f"abfss://tests/{filename}") + assert existing_input_file.exists() + + existing_output_file = adlfs_fsspec_fileio.new_output(location=f"abfss://tests/{filename}") + assert existing_output_file.exists() + + adlfs_fsspec_fileio.delete(existing_output_file) + + +@pytest.mark.adlfs +def test_closing_a_file_adlfs(adlfs_fsspec_fileio: FsspecFileIO) -> None: + """Test closing an output file and input file""" + filename = str(uuid.uuid4()) + output_file = adlfs_fsspec_fileio.new_output(location=f"abfss://tests/{filename}") + with output_file.create() as write_file: + write_file.write(b"foo") + assert not write_file.closed # type: ignore + assert write_file.closed # type: ignore + + input_file = adlfs_fsspec_fileio.new_input(location=f"abfss://tests/{filename}") + f = input_file.open() + assert not f.closed # type: ignore + f.close() + assert f.closed # type: ignore + + adlfs_fsspec_fileio.delete(f"abfss://tests/{filename}") + + +@pytest.mark.adlfs +def test_fsspec_converting_an_outputfile_to_an_inputfile_adlfs(adlfs_fsspec_fileio: FsspecFileIO) -> None: + """Test converting an output file to an input file""" + filename = str(uuid.uuid4()) + output_file = adlfs_fsspec_fileio.new_output(location=f"abfss://tests/{filename}") + input_file = output_file.to_input_file() + assert input_file.location == output_file.location + + +@pytest.mark.adlfs +def test_writing_avro_file_adlfs(generated_manifest_entry_file: str, adlfs_fsspec_fileio: FsspecFileIO) -> None: + """Test that bytes match when reading a local avro file, writing it using fsspec file-io, and then reading it again""" + filename = str(uuid.uuid4()) + with PyArrowFileIO().new_input(location=generated_manifest_entry_file).open() as f: + b1 = f.read() + with adlfs_fsspec_fileio.new_output(location=f"abfss://tests/{filename}").create() as out_f: + out_f.write(b1) + with adlfs_fsspec_fileio.new_input(location=f"abfss://tests/{filename}").open() as in_f: + b2 = in_f.read() + assert b1 == b2 # Check that bytes of read from local avro file match bytes written to adlfs + + adlfs_fsspec_fileio.delete(f"abfss://tests/{filename}") + + +@pytest.mark.gcs +def test_fsspec_new_input_file_gcs(fsspec_fileio_gcs: FsspecFileIO) -> None: + """Test creating a new input file from a fsspec file-io""" + location = f"gs://warehouse/{uuid.uuid4()}.txt" + + input_file = fsspec_fileio_gcs.new_input(location=location) + + assert isinstance(input_file, fsspec.FsspecInputFile) + assert input_file.location == location + + +@pytest.mark.gcs +def test_fsspec_new_output_file_gcs(fsspec_fileio_gcs: FsspecFileIO) -> None: + """Test creating a new output file from an fsspec file-io""" + location = f"gs://warehouse/{uuid.uuid4()}.txt" + + output_file = fsspec_fileio_gcs.new_output(location=location) + + assert isinstance(output_file, fsspec.FsspecOutputFile) + assert output_file.location == location + + +@pytest.mark.gcs +def test_fsspec_write_and_read_file_gcs(fsspec_fileio_gcs: FsspecFileIO) -> None: + """Test writing and reading a file using FsspecInputFile and FsspecOutputFile""" + location = f"gs://warehouse/{uuid.uuid4()}.txt" + output_file = fsspec_fileio_gcs.new_output(location=location) + with output_file.create() as f: + f.write(b"foo") + + input_file = fsspec_fileio_gcs.new_input(location) + with input_file.open() as f: + assert f.read() == b"foo" + + fsspec_fileio_gcs.delete(input_file) + + +@pytest.mark.gcs +def test_fsspec_getting_length_of_file_gcs(fsspec_fileio_gcs: FsspecFileIO) -> None: + """Test getting the length of an FsspecInputFile and FsspecOutputFile""" + location = f"gs://warehouse/{uuid.uuid4()}.txt" + + output_file = fsspec_fileio_gcs.new_output(location=location) + with output_file.create() as f: + f.write(b"foobar") + + assert len(output_file) == 6 + + input_file = fsspec_fileio_gcs.new_input(location=location) + assert len(input_file) == 6 + + fsspec_fileio_gcs.delete(output_file) + + +@pytest.mark.gcs +def test_fsspec_file_tell_gcs(fsspec_fileio_gcs: FsspecFileIO) -> None: + """Test finding cursor position for an fsspec file-io file""" + location = f"gs://warehouse/{uuid.uuid4()}.txt" + + output_file = fsspec_fileio_gcs.new_output(location=location) + with output_file.create() as write_file: + write_file.write(b"foobar") + + input_file = fsspec_fileio_gcs.new_input(location=location) + with input_file.open() as f: + f.seek(0) + assert f.tell() == 0 + f.seek(1) + assert f.tell() == 1 + f.seek(3) + assert f.tell() == 3 + f.seek(0) + assert f.tell() == 0 + + +@pytest.mark.gcs +def test_fsspec_read_specified_bytes_for_file_gcs(fsspec_fileio_gcs: FsspecFileIO) -> None: + """Test reading a specified number of bytes from a fsspec file-io file""" + location = f"gs://warehouse/{uuid.uuid4()}.txt" + + output_file = fsspec_fileio_gcs.new_output(location=location) + with output_file.create() as write_file: + write_file.write(b"foo") + + input_file = fsspec_fileio_gcs.new_input(location=location) + with input_file.open() as f: + f.seek(0) + assert b"f" == f.read(1) + f.seek(0) + assert b"fo" == f.read(2) + f.seek(1) + assert b"o" == f.read(1) + f.seek(1) + assert b"oo" == f.read(2) + f.seek(0) + assert b"foo" == f.read(999) # test reading amount larger than entire content length + + fsspec_fileio_gcs.delete(input_file) + + +@pytest.mark.gcs +def test_fsspec_raise_on_opening_file_not_found_gcs(fsspec_fileio_gcs: FsspecFileIO) -> None: + """Test that a fsspec input file raises appropriately when the gcs file is not found""" + location = f"gs://warehouse/{uuid.uuid4()}.txt" + input_file = fsspec_fileio_gcs.new_input(location=location) + with pytest.raises(FileNotFoundError) as exc_info: + input_file.open().read() + + assert location in str(exc_info.value) + + +@pytest.mark.gcs +def test_checking_if_a_file_exists_gcs(fsspec_fileio_gcs: FsspecFileIO) -> None: + """Test checking if a file exists""" + + non_existent_file = fsspec_fileio_gcs.new_input(location="gs://warehouse/does-not-exist.txt") + assert not non_existent_file.exists() + + location = f"gs://warehouse/{uuid.uuid4()}.txt" + output_file = fsspec_fileio_gcs.new_output(location=location) + assert not output_file.exists() + with output_file.create() as f: + f.write(b"foo") + + existing_input_file = fsspec_fileio_gcs.new_input(location=location) + assert existing_input_file.exists() + + existing_output_file = fsspec_fileio_gcs.new_output(location=location) + assert existing_output_file.exists() + + fsspec_fileio_gcs.delete(existing_output_file) + + +@pytest.mark.gcs +def test_closing_a_file_gcs(fsspec_fileio_gcs: FsspecFileIO) -> None: + """Test closing an output file and input file""" + location = f"gs://warehouse/{uuid.uuid4()}.txt" + output_file = fsspec_fileio_gcs.new_output(location=location) + with output_file.create() as write_file: + write_file.write(b"foo") + assert not write_file.closed # type: ignore + assert write_file.closed # type: ignore + + input_file = fsspec_fileio_gcs.new_input(location=location) + f = input_file.open() + assert not f.closed # type: ignore + f.close() + assert f.closed # type: ignore + + fsspec_fileio_gcs.delete(location=location) + + +@pytest.mark.gcs +def test_fsspec_converting_an_outputfile_to_an_inputfile_gcs(fsspec_fileio_gcs: FsspecFileIO) -> None: + """Test converting an output file to an input file""" + filename = str(uuid.uuid4()) + output_file = fsspec_fileio_gcs.new_output(location=f"gs://warehouse/{filename}") + input_file = output_file.to_input_file() + assert input_file.location == output_file.location + + +@pytest.mark.gcs +def test_writing_avro_file_gcs(generated_manifest_entry_file: str, fsspec_fileio_gcs: FsspecFileIO) -> None: + """Test that bytes match when reading a local avro file, writing it using fsspec file-io, and then reading it again""" + filename = str(uuid.uuid4()) + with PyArrowFileIO().new_input(location=generated_manifest_entry_file).open() as f: + b1 = f.read() + with fsspec_fileio_gcs.new_output(location=f"gs://warehouse/{filename}").create() as out_f: + out_f.write(b1) + with fsspec_fileio_gcs.new_input(location=f"gs://warehouse/{filename}").open() as in_f: + b2 = in_f.read() + assert b1 == b2 # Check that bytes of read from local avro file match bytes written to s3 + + fsspec_fileio_gcs.delete(f"gs://warehouse/{filename}") + + +TEST_URI = "https://iceberg-test-signer" + + +def test_s3v4_rest_signer(requests_mock: Mocker) -> None: + new_uri = "https://other-bucket/metadata/snap-8048355899640248710-1-a5c8ea2d-aa1f-48e8-89f4-1fa69db8c742.avro" + requests_mock.post( + f"{TEST_URI}/v1/aws/s3/sign", + json={ + "uri": new_uri, + "headers": { + "Authorization": [ + "AWS4-HMAC-SHA256 Credential=ASIAQPRZZYGHUT57DL3I/20221017/us-west-2/s3/aws4_request, SignedHeaders=host;x-amz-content-sha256;x-amz-date;x-amz-security-token, Signature=430582a17d61ab02c272896fa59195f277af4bdf2121c441685e589f044bbe02" + ], + "Host": ["bucket.s3.us-west-2.amazonaws.com"], + "User-Agent": ["Botocore/1.27.59 Python/3.10.7 Darwin/21.5.0"], + "x-amz-content-sha256": ["UNSIGNED-PAYLOAD"], + "X-Amz-Date": ["20221017T102940Z"], + "X-Amz-Security-Token": [ + "YQoJb3JpZ2luX2VjEDoaCXVzLXdlc3QtMiJGMEQCID/fFxZP5oaEgQmcwP6XhZa0xSq9lmLSx8ffaWbySfUPAiAesa7sjd/WV4uwRTO0S03y/MWVtgpH+/NyZQ4bZgLVriqrAggTEAEaDDAzMzQwNzIyMjE1OSIMOeFOWhZIurMmAqjsKogCxMCqxX8ZjK0gacAkcDqBCyA7qTSLhdfKQIH/w7WpLBU1km+cRUWWCudan6gZsAq867DBaKEP7qI05DAWr9MChAkgUgyI8/G3Z23ET0gAedf3GsJbakB0F1kklx8jPmj4BPCht9RcTiXiJ5DxTS/cRCcalIQXmPFbaJSqpBusVG2EkWnm1v7VQrNPE2Os2b2P293vpbhwkyCEQiGRVva4Sw9D1sKvqSsK10QCRG+os6dFEOu1kARaXi6pStvR4OVmj7OYeAYjzaFchn7nz2CSae0M4IluiYQ01eQAywbfRo9DpKSmDM/DnPZWJnD/woLhaaaCrCxSSEaFsvGOHFhLd3Rknw1v0jADMILUtJoGOp4BpqKqyMz0CY3kpKL0jfR3ykTf/ge9wWVE0Alr7wRIkGCIURkhslGHqSyFRGoTqIXaxU+oPbwlw/0w/nYO7qQ6bTANOWye/wgw4h/NmJ6vU7wnZTXwREf1r6MF72++bE/fMk19LfVb8jN/qrUqAUXTc8gBAUxL5pgy8+oT/JnI2BkVrrLS4ilxEXP9Ahm+6GDUYXV4fBpqpZwdkzQ/5Gw=" + ], + }, + "extensions": {}, + }, + status_code=200, + ) + + request = AWSRequest( + method="HEAD", + url="https://bucket/metadata/snap-8048355899640248710-1-a5c8ea2d-aa1f-48e8-89f4-1fa69db8c742.avro", + headers={"User-Agent": "Botocore/1.27.59 Python/3.10.7 Darwin/21.5.0"}, + data=b"", + params={}, + auth_path="/metadata/snap-8048355899640248710-1-a5c8ea2d-aa1f-48e8-89f4-1fa69db8c742.avro", + ) + request.context = { + "client_region": "us-west-2", + "has_streaming_input": False, + "auth_type": None, + "signing": {"bucket": "bucket"}, + "retries": {"attempt": 1, "invocation-id": "75d143fb-0219-439b-872c-18213d1c8d54"}, + } + + signed_request = s3v4_rest_signer({"token": "abc", "uri": TEST_URI}, request) + + assert signed_request.url == new_uri + assert dict(signed_request.headers) == { + "Authorization": "AWS4-HMAC-SHA256 Credential=ASIAQPRZZYGHUT57DL3I/20221017/us-west-2/s3/aws4_request, SignedHeaders=host;x-amz-content-sha256;x-amz-date;x-amz-security-token, Signature=430582a17d61ab02c272896fa59195f277af4bdf2121c441685e589f044bbe02", + "Host": "bucket.s3.us-west-2.amazonaws.com", + "User-Agent": "Botocore/1.27.59 Python/3.10.7 Darwin/21.5.0", + "X-Amz-Date": "20221017T102940Z", + "X-Amz-Security-Token": "YQoJb3JpZ2luX2VjEDoaCXVzLXdlc3QtMiJGMEQCID/fFxZP5oaEgQmcwP6XhZa0xSq9lmLSx8ffaWbySfUPAiAesa7sjd/WV4uwRTO0S03y/MWVtgpH+/NyZQ4bZgLVriqrAggTEAEaDDAzMzQwNzIyMjE1OSIMOeFOWhZIurMmAqjsKogCxMCqxX8ZjK0gacAkcDqBCyA7qTSLhdfKQIH/w7WpLBU1km+cRUWWCudan6gZsAq867DBaKEP7qI05DAWr9MChAkgUgyI8/G3Z23ET0gAedf3GsJbakB0F1kklx8jPmj4BPCht9RcTiXiJ5DxTS/cRCcalIQXmPFbaJSqpBusVG2EkWnm1v7VQrNPE2Os2b2P293vpbhwkyCEQiGRVva4Sw9D1sKvqSsK10QCRG+os6dFEOu1kARaXi6pStvR4OVmj7OYeAYjzaFchn7nz2CSae0M4IluiYQ01eQAywbfRo9DpKSmDM/DnPZWJnD/woLhaaaCrCxSSEaFsvGOHFhLd3Rknw1v0jADMILUtJoGOp4BpqKqyMz0CY3kpKL0jfR3ykTf/ge9wWVE0Alr7wRIkGCIURkhslGHqSyFRGoTqIXaxU+oPbwlw/0w/nYO7qQ6bTANOWye/wgw4h/NmJ6vU7wnZTXwREf1r6MF72++bE/fMk19LfVb8jN/qrUqAUXTc8gBAUxL5pgy8+oT/JnI2BkVrrLS4ilxEXP9Ahm+6GDUYXV4fBpqpZwdkzQ/5Gw=", + "x-amz-content-sha256": "UNSIGNED-PAYLOAD", + } + + +def test_s3v4_rest_signer_forbidden(requests_mock: Mocker) -> None: + requests_mock.post( + f"{TEST_URI}/v1/aws/s3/sign", + json={ + "method": "HEAD", + "region": "us-west-2", + "uri": "https://bucket/metadata/snap-8048355899640248710-1-a5c8ea2d-aa1f-48e8-89f4-1fa69db8c742.avro", + "headers": {"User-Agent": ["Botocore/1.27.59 Python/3.10.7 Darwin/21.5.0"]}, + }, + status_code=401, + ) + + request = AWSRequest( + method="HEAD", + url="https://bucket/metadata/snap-8048355899640248710-1-a5c8ea2d-aa1f-48e8-89f4-1fa69db8c742.avro", + headers={"User-Agent": "Botocore/1.27.59 Python/3.10.7 Darwin/21.5.0"}, + data=b"", + params={}, + auth_path="/metadata/snap-8048355899640248710-1-a5c8ea2d-aa1f-48e8-89f4-1fa69db8c742.avro", + ) + request.context = { + "client_region": "us-west-2", + "has_streaming_input": False, + "auth_type": None, + "signing": {"bucket": "bucket"}, + "retries": {"attempt": 1, "invocation-id": "75d143fb-0219-439b-872c-18213d1c8d54"}, + } + + with pytest.raises(SignError) as exc_info: + _ = s3v4_rest_signer({"token": "abc", "uri": TEST_URI}, request) + + assert ( + """Failed to sign request 401: {'method': 'HEAD', 'region': 'us-west-2', 'uri': 'https://bucket/metadata/snap-8048355899640248710-1-a5c8ea2d-aa1f-48e8-89f4-1fa69db8c742.avro', 'headers': {'User-Agent': ['Botocore/1.27.59 Python/3.10.7 Darwin/21.5.0']}}""" + in str(exc_info.value) + ) diff --git a/tests/io/test_io.py b/tests/io/test_io.py new file mode 100644 index 0000000000..c4dc3d45a5 --- /dev/null +++ b/tests/io/test_io.py @@ -0,0 +1,313 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import os +import tempfile + +import pytest + +from pyiceberg.io import ( + ARROW_FILE_IO, + PY_IO_IMPL, + _import_file_io, + _infer_file_io_from_scheme, + load_file_io, +) +from pyiceberg.io.pyarrow import PyArrowFileIO + + +def test_custom_local_input_file() -> None: + """Test initializing an InputFile implementation to read a local file""" + with tempfile.TemporaryDirectory() as tmpdirname: + file_location = os.path.join(tmpdirname, "foo.txt") + with open(file_location, "wb") as write_file: + write_file.write(b"foo") + + # Confirm that the file initially exists + assert os.path.exists(file_location) + + # Instantiate the input file + absolute_file_location = os.path.abspath(file_location) + input_file = PyArrowFileIO().new_input(location=f"{absolute_file_location}") + + # Test opening and reading the file + f = input_file.open() + data = f.read() + assert data == b"foo" + assert len(input_file) == 3 + + +def test_custom_local_output_file() -> None: + """Test initializing an OutputFile implementation to write to a local file""" + with tempfile.TemporaryDirectory() as tmpdirname: + file_location = os.path.join(tmpdirname, "foo.txt") + + # Instantiate the output file + absolute_file_location = os.path.abspath(file_location) + output_file = PyArrowFileIO().new_output(location=f"{absolute_file_location}") + + # Create the output file and write to it + f = output_file.create() + f.write(b"foo") + + # Confirm that bytes were written + with open(file_location, "rb") as f: + assert f.read() == b"foo" + + assert len(output_file) == 3 + + +def test_custom_local_output_file_with_overwrite() -> None: + """Test initializing an OutputFile implementation to overwrite a local file""" + with tempfile.TemporaryDirectory() as tmpdirname: + output_file_location = os.path.join(tmpdirname, "foo.txt") + + # Create a file in the temporary directory + with open(output_file_location, "wb") as write_file: + write_file.write(b"foo") + + # Instantiate an output file + output_file = PyArrowFileIO().new_output(location=f"{output_file_location}") + + # Confirm that a FileExistsError is raised when overwrite=False + with pytest.raises(FileExistsError): + f = output_file.create(overwrite=False) + f.write(b"foo") + + # Confirm that the file is overwritten with overwrite=True + f = output_file.create(overwrite=True) + f.write(b"bar") + with open(output_file_location, "rb") as f: + assert f.read() == b"bar" + + +def test_custom_file_exists() -> None: + """Test that the exists property returns the proper value for existing and non-existing files""" + with tempfile.TemporaryDirectory() as tmpdirname: + file_location = os.path.join(tmpdirname, "foo.txt") + with open(file_location, "wb") as f: + f.write(b"foo") + + nonexistent_file_location = os.path.join(tmpdirname, "bar.txt") + + # Confirm that the file initially exists + assert os.path.exists(file_location) + + # Get an absolute path for an existing file and a nonexistent file + absolute_file_location = os.path.abspath(file_location) + non_existent_absolute_file_location = os.path.abspath(nonexistent_file_location) + + # Create InputFile instances + input_file = PyArrowFileIO().new_input(location=f"{absolute_file_location}") + non_existent_input_file = PyArrowFileIO().new_input(location=f"{non_existent_absolute_file_location}") + + # Test opening and reading the file + assert input_file.exists() + assert not non_existent_input_file.exists() + + # Create OutputFile instances + file = PyArrowFileIO().new_output(location=f"{absolute_file_location}") + non_existent_file = PyArrowFileIO().new_output(location=f"{non_existent_absolute_file_location}") + + # Test opening and reading the file + assert file.exists() + assert not non_existent_file.exists() + + +def test_output_file_to_input_file() -> None: + """Test initializing an InputFile using the `to_input_file()` method on an OutputFile instance""" + with tempfile.TemporaryDirectory() as tmpdirname: + output_file_location = os.path.join(tmpdirname, "foo.txt") + + # Create an output file instance + output_file = PyArrowFileIO().new_output(location=f"{output_file_location}") + + # Create the output file and write to it + with output_file.create() as output_stream: + output_stream.write(b"foo") + + # Convert to an input file and confirm the contents + input_file = output_file.to_input_file() + with input_file.open() as f: + assert f.read() == b"foo" + + +@pytest.mark.parametrize( + "string_uri", + [ + "foo/bar/baz.parquet", + "file:/foo/bar/baz.parquet", + "file:/foo/bar/baz.parquet", + ], +) +def test_custom_file_io_locations(string_uri: str) -> None: + """Test that the location property is maintained as the value of the location argument""" + # Instantiate the file-io and create a new input and output file + file_io = PyArrowFileIO() + input_file = file_io.new_input(location=string_uri) + assert input_file.location == string_uri + + output_file = file_io.new_output(location=string_uri) + assert output_file.location == string_uri + + +def test_deleting_local_file_using_file_io() -> None: + """Test deleting a local file using FileIO.delete(...)""" + with tempfile.TemporaryDirectory() as tmpdirname: + # Write to the temporary file + output_file_location = os.path.join(tmpdirname, "foo.txt") + with open(output_file_location, "wb") as f: + f.write(b"foo") + + # Instantiate the file-io + file_io = PyArrowFileIO() + + # Confirm that the file initially exists + assert os.path.exists(output_file_location) + + # Delete the file using the file-io implementations delete method + file_io.delete(output_file_location) + + # Confirm that the file no longer exists + assert not os.path.exists(output_file_location) + + +def test_raise_file_not_found_error_for_fileio_delete() -> None: + """Test raising a FileNotFound error when trying to delete a non-existent file""" + with tempfile.TemporaryDirectory() as tmpdirname: + # Write to the temporary file + output_file_location = os.path.join(tmpdirname, "foo.txt") + + # Instantiate the file-io + file_io = PyArrowFileIO() + + # Delete the non-existent file using the file-io implementations delete method + with pytest.raises(FileNotFoundError) as exc_info: + file_io.delete(output_file_location) + + assert "Cannot delete file" in str(exc_info.value) + + # Confirm that the file no longer exists + assert not os.path.exists(output_file_location) + + +def test_deleting_local_file_using_file_io_input_file() -> None: + """Test deleting a local file by passing an InputFile instance to FileIO.delete(...)""" + with tempfile.TemporaryDirectory() as tmpdirname: + # Write to the temporary file + file_location = os.path.join(tmpdirname, "foo.txt") + with open(file_location, "wb") as f: + f.write(b"foo") + + # Instantiate the file-io + file_io = PyArrowFileIO() + + # Confirm that the file initially exists + assert os.path.exists(file_location) + + # Instantiate the custom InputFile + input_file = PyArrowFileIO().new_input(location=f"{file_location}") + + # Delete the file using the file-io implementations delete method + file_io.delete(input_file) + + # Confirm that the file no longer exists + assert not os.path.exists(file_location) + + +def test_deleting_local_file_using_file_io_output_file() -> None: + """Test deleting a local file by passing an OutputFile instance to FileIO.delete(...)""" + with tempfile.TemporaryDirectory() as tmpdirname: + # Write to the temporary file + file_location = os.path.join(tmpdirname, "foo.txt") + with open(file_location, "wb") as f: + f.write(b"foo") + + # Instantiate the file-io + file_io = PyArrowFileIO() + + # Confirm that the file initially exists + assert os.path.exists(file_location) + + # Instantiate the custom OutputFile + output_file = PyArrowFileIO().new_output(location=f"{file_location}") + + # Delete the file using the file-io implementations delete method + file_io.delete(output_file) + + # Confirm that the file no longer exists + assert not os.path.exists(file_location) + + +def test_import_file_io() -> None: + assert isinstance(_import_file_io(ARROW_FILE_IO, {}), PyArrowFileIO) + + +def test_import_file_io_does_not_exist() -> None: + assert _import_file_io("pyiceberg.does.not.exist.FileIO", {}) is None + + +def test_load_file() -> None: + assert isinstance(load_file_io({PY_IO_IMPL: ARROW_FILE_IO}), PyArrowFileIO) + + +def test_load_file_io_no_arguments() -> None: + assert isinstance(load_file_io({}), PyArrowFileIO) + + +def test_load_file_io_does_not_exist() -> None: + with pytest.raises(ValueError) as exc_info: + load_file_io({PY_IO_IMPL: "pyiceberg.does.not.exist.FileIO"}) + + assert "Could not initialize FileIO: pyiceberg.does.not.exist.FileIO" in str(exc_info.value) + + +def test_load_file_io_warehouse() -> None: + assert isinstance(load_file_io({"warehouse": "s3://some-path/"}), PyArrowFileIO) + + +def test_load_file_io_location() -> None: + assert isinstance(load_file_io({"location": "s3://some-path/"}), PyArrowFileIO) + + +def test_load_file_io_location_no_schema() -> None: + assert isinstance(load_file_io({"location": "/no-schema/"}), PyArrowFileIO) + + +def test_mock_warehouse_location_file_io() -> None: + # For testing the selection logic + io = load_file_io({"warehouse": "test://some-path/"}) + assert io.properties["warehouse"] == "test://some-path/" + + +def test_mock_table_location_file_io() -> None: + # For testing the selection logic + io = load_file_io({}, "test://some-path/") + assert io.properties == {} + + +def test_gibberish_table_location_file_io() -> None: + # For testing the selection logic + assert isinstance(load_file_io({}, "gibberish"), PyArrowFileIO) + + +def test_infer_file_io_from_schema_unknown() -> None: + # When we have an unknown scheme, we would like to know + with pytest.warns(UserWarning) as w: + _infer_file_io_from_scheme("unknown://bucket/path/", {}) + + assert str(w[0].message) == "No preferred file implementation for scheme: unknown" diff --git a/tests/io/test_pyarrow.py b/tests/io/test_pyarrow.py new file mode 100644 index 0000000000..8b62212593 --- /dev/null +++ b/tests/io/test_pyarrow.py @@ -0,0 +1,1544 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# pylint: disable=protected-access,unused-argument,redefined-outer-name + +import os +import tempfile +from typing import Any, List, Optional +from unittest.mock import MagicMock, patch +from uuid import uuid4 + +import pyarrow as pa +import pyarrow.parquet as pq +import pytest +from pyarrow.fs import FileType, LocalFileSystem + +from pyiceberg.avro.resolver import ResolveError +from pyiceberg.catalog.noop import NoopCatalog +from pyiceberg.expressions import ( + AlwaysFalse, + AlwaysTrue, + And, + BooleanExpression, + BoundEqualTo, + BoundGreaterThan, + BoundGreaterThanOrEqual, + BoundIn, + BoundIsNaN, + BoundIsNull, + BoundLessThan, + BoundLessThanOrEqual, + BoundNotEqualTo, + BoundNotIn, + BoundNotNaN, + BoundNotNull, + BoundNotStartsWith, + BoundReference, + BoundStartsWith, + GreaterThan, + Not, + Or, + literal, +) +from pyiceberg.io import InputStream, OutputStream, load_file_io +from pyiceberg.io.pyarrow import ( + PyArrowFile, + PyArrowFileIO, + _ConvertToArrowSchema, + _read_deletes, + expression_to_pyarrow, + project_table, + schema_to_pyarrow, +) +from pyiceberg.manifest import DataFile, DataFileContent, FileFormat +from pyiceberg.partitioning import PartitionSpec +from pyiceberg.schema import Schema, visit +from pyiceberg.table import FileScanTask, Table +from pyiceberg.table.metadata import TableMetadataV2 +from pyiceberg.types import ( + BinaryType, + BooleanType, + DateType, + DecimalType, + DoubleType, + FixedType, + FloatType, + IntegerType, + ListType, + LongType, + MapType, + NestedField, + StringType, + StructType, + TimestampType, + TimestamptzType, + TimeType, +) + + +def test_pyarrow_input_file() -> None: + """Test reading a file using PyArrowFile""" + + with tempfile.TemporaryDirectory() as tmpdirname: + file_location = os.path.join(tmpdirname, "foo.txt") + with open(file_location, "wb") as f: + f.write(b"foo") + + # Confirm that the file initially exists + assert os.path.exists(file_location) + + # Instantiate the input file + absolute_file_location = os.path.abspath(file_location) + input_file = PyArrowFileIO().new_input(location=f"{absolute_file_location}") + + # Test opening and reading the file + r = input_file.open(seekable=False) + assert isinstance(r, InputStream) # Test that the file object abides by the InputStream protocol + data = r.read() + assert data == b"foo" + assert len(input_file) == 3 + with pytest.raises(OSError) as exc_info: + r.seek(0, 0) + assert "only valid on seekable files" in str(exc_info.value) + + +def test_pyarrow_input_file_seekable() -> None: + """Test reading a file using PyArrowFile""" + + with tempfile.TemporaryDirectory() as tmpdirname: + file_location = os.path.join(tmpdirname, "foo.txt") + with open(file_location, "wb") as f: + f.write(b"foo") + + # Confirm that the file initially exists + assert os.path.exists(file_location) + + # Instantiate the input file + absolute_file_location = os.path.abspath(file_location) + input_file = PyArrowFileIO().new_input(location=f"{absolute_file_location}") + + # Test opening and reading the file + r = input_file.open(seekable=True) + assert isinstance(r, InputStream) # Test that the file object abides by the InputStream protocol + data = r.read() + assert data == b"foo" + assert len(input_file) == 3 + r.seek(0, 0) + data = r.read() + assert data == b"foo" + assert len(input_file) == 3 + + +def test_pyarrow_output_file() -> None: + """Test writing a file using PyArrowFile""" + + with tempfile.TemporaryDirectory() as tmpdirname: + file_location = os.path.join(tmpdirname, "foo.txt") + + # Instantiate the output file + absolute_file_location = os.path.abspath(file_location) + output_file = PyArrowFileIO().new_output(location=f"{absolute_file_location}") + + # Create the output file and write to it + f = output_file.create() + assert isinstance(f, OutputStream) # Test that the file object abides by the OutputStream protocol + f.write(b"foo") + + # Confirm that bytes were written + with open(file_location, "rb") as f: + assert f.read() == b"foo" + + assert len(output_file) == 3 + + +def test_pyarrow_invalid_scheme() -> None: + """Test that a ValueError is raised if a location is provided with an invalid scheme""" + + with pytest.raises(ValueError) as exc_info: + PyArrowFileIO().new_input("foo://bar/baz.txt") + + assert "Unrecognized filesystem type in URI" in str(exc_info.value) + + with pytest.raises(ValueError) as exc_info: + PyArrowFileIO().new_output("foo://bar/baz.txt") + + assert "Unrecognized filesystem type in URI" in str(exc_info.value) + + +def test_pyarrow_violating_input_stream_protocol() -> None: + """Test that a TypeError is raised if an input file is provided that violates the InputStream protocol""" + + # Missing seek, tell, closed, and close + input_file_mock = MagicMock(spec=["read"]) + + # Create a mocked filesystem that returns input_file_mock + filesystem_mock = MagicMock() + filesystem_mock.open_input_file.return_value = input_file_mock + + input_file = PyArrowFile("foo.txt", path="foo.txt", fs=filesystem_mock) + + f = input_file.open() + assert not isinstance(f, InputStream) + + +def test_pyarrow_violating_output_stream_protocol() -> None: + """Test that a TypeError is raised if an output stream is provided that violates the OutputStream protocol""" + + # Missing closed, and close + output_file_mock = MagicMock(spec=["write", "exists"]) + output_file_mock.exists.return_value = False + + file_info_mock = MagicMock() + file_info_mock.type = FileType.NotFound + + # Create a mocked filesystem that returns output_file_mock + filesystem_mock = MagicMock() + filesystem_mock.open_output_stream.return_value = output_file_mock + filesystem_mock.get_file_info.return_value = file_info_mock + + output_file = PyArrowFile("foo.txt", path="foo.txt", fs=filesystem_mock) + + f = output_file.create() + + assert not isinstance(f, OutputStream) + + +def test_raise_on_opening_a_local_file_not_found() -> None: + """Test that a PyArrowFile raises appropriately when a local file is not found""" + + with tempfile.TemporaryDirectory() as tmpdirname: + file_location = os.path.join(tmpdirname, "foo.txt") + f = PyArrowFileIO().new_input(file_location) + + with pytest.raises(FileNotFoundError) as exc_info: + f.open() + + assert "[Errno 2] Failed to open local file" in str(exc_info.value) + + +def test_raise_on_opening_an_s3_file_no_permission() -> None: + """Test that opening a PyArrowFile raises a PermissionError when the pyarrow error includes 'AWS Error [code 15]'""" + + s3fs_mock = MagicMock() + s3fs_mock.open_input_file.side_effect = OSError("AWS Error [code 15]") + + f = PyArrowFile("s3://foo/bar.txt", path="foo/bar.txt", fs=s3fs_mock) + + with pytest.raises(PermissionError) as exc_info: + f.open() + + assert "Cannot open file, access denied:" in str(exc_info.value) + + +def test_raise_on_opening_an_s3_file_not_found() -> None: + """Test that a PyArrowFile raises a FileNotFoundError when the pyarrow error includes 'Path does not exist'""" + + s3fs_mock = MagicMock() + s3fs_mock.open_input_file.side_effect = OSError("Path does not exist") + + f = PyArrowFile("s3://foo/bar.txt", path="foo/bar.txt", fs=s3fs_mock) + + with pytest.raises(FileNotFoundError) as exc_info: + f.open() + + assert "Cannot open file, does not exist:" in str(exc_info.value) + + +@patch("pyiceberg.io.pyarrow.PyArrowFile.exists", return_value=False) +def test_raise_on_creating_an_s3_file_no_permission(_: Any) -> None: + """Test that creating a PyArrowFile raises a PermissionError when the pyarrow error includes 'AWS Error [code 15]'""" + + s3fs_mock = MagicMock() + s3fs_mock.open_output_stream.side_effect = OSError("AWS Error [code 15]") + + f = PyArrowFile("s3://foo/bar.txt", path="foo/bar.txt", fs=s3fs_mock) + + with pytest.raises(PermissionError) as exc_info: + f.create() + + assert "Cannot create file, access denied:" in str(exc_info.value) + + +def test_deleting_s3_file_no_permission() -> None: + """Test that a PyArrowFile raises a PermissionError when the pyarrow OSError includes 'AWS Error [code 15]'""" + + s3fs_mock = MagicMock() + s3fs_mock.delete_file.side_effect = OSError("AWS Error [code 15]") + + with patch.object(PyArrowFileIO, "_initialize_fs") as submocked: + submocked.return_value = s3fs_mock + + with pytest.raises(PermissionError) as exc_info: + PyArrowFileIO().delete("s3://foo/bar.txt") + + assert "Cannot delete file, access denied:" in str(exc_info.value) + + +def test_deleting_s3_file_not_found() -> None: + """Test that a PyArrowFile raises a PermissionError when the pyarrow error includes 'AWS Error [code 15]'""" + + s3fs_mock = MagicMock() + s3fs_mock.delete_file.side_effect = OSError("Path does not exist") + + with patch.object(PyArrowFileIO, "_initialize_fs") as submocked: + submocked.return_value = s3fs_mock + + with pytest.raises(FileNotFoundError) as exc_info: + PyArrowFileIO().delete("s3://foo/bar.txt") + + assert "Cannot delete file, does not exist:" in str(exc_info.value) + + +def test_deleting_hdfs_file_not_found() -> None: + """Test that a PyArrowFile raises a PermissionError when the pyarrow error includes 'No such file or directory'""" + + hdfs_mock = MagicMock() + hdfs_mock.delete_file.side_effect = OSError("Path does not exist") + + with patch.object(PyArrowFileIO, "_initialize_fs") as submocked: + submocked.return_value = hdfs_mock + + with pytest.raises(FileNotFoundError) as exc_info: + PyArrowFileIO().delete("hdfs://foo/bar.txt") + + assert "Cannot delete file, does not exist:" in str(exc_info.value) + + +def test_schema_to_pyarrow_schema(table_schema_nested: Schema) -> None: + actual = schema_to_pyarrow(table_schema_nested) + expected = """foo: string + -- field metadata -- + field_id: '1' +bar: int32 not null + -- field metadata -- + field_id: '2' +baz: bool + -- field metadata -- + field_id: '3' +qux: list not null + child 0, element: string not null + -- field metadata -- + field_id: '5' + -- field metadata -- + field_id: '4' +quux: map> not null + child 0, entries: struct not null> not null + child 0, key: string not null + -- field metadata -- + field_id: '7' + child 1, value: map not null + child 0, entries: struct not null + child 0, key: string not null + -- field metadata -- + field_id: '9' + child 1, value: int32 not null + -- field metadata -- + field_id: '10' + -- field metadata -- + field_id: '8' + -- field metadata -- + field_id: '6' +location: list not null> not null + child 0, element: struct not null + child 0, latitude: float + -- field metadata -- + field_id: '13' + child 1, longitude: float + -- field metadata -- + field_id: '14' + -- field metadata -- + field_id: '12' + -- field metadata -- + field_id: '11' +person: struct + child 0, name: string + -- field metadata -- + field_id: '16' + child 1, age: int32 not null + -- field metadata -- + field_id: '17' + -- field metadata -- + field_id: '15'""" + assert repr(actual) == expected + + +def test_fixed_type_to_pyarrow() -> None: + length = 22 + iceberg_type = FixedType(length) + assert visit(iceberg_type, _ConvertToArrowSchema()) == pa.binary(length) + + +def test_decimal_type_to_pyarrow() -> None: + precision = 25 + scale = 19 + iceberg_type = DecimalType(precision, scale) + assert visit(iceberg_type, _ConvertToArrowSchema()) == pa.decimal128(precision, scale) + + +def test_boolean_type_to_pyarrow() -> None: + iceberg_type = BooleanType() + assert visit(iceberg_type, _ConvertToArrowSchema()) == pa.bool_() + + +def test_integer_type_to_pyarrow() -> None: + iceberg_type = IntegerType() + assert visit(iceberg_type, _ConvertToArrowSchema()) == pa.int32() + + +def test_long_type_to_pyarrow() -> None: + iceberg_type = LongType() + assert visit(iceberg_type, _ConvertToArrowSchema()) == pa.int64() + + +def test_float_type_to_pyarrow() -> None: + iceberg_type = FloatType() + assert visit(iceberg_type, _ConvertToArrowSchema()) == pa.float32() + + +def test_double_type_to_pyarrow() -> None: + iceberg_type = DoubleType() + assert visit(iceberg_type, _ConvertToArrowSchema()) == pa.float64() + + +def test_date_type_to_pyarrow() -> None: + iceberg_type = DateType() + assert visit(iceberg_type, _ConvertToArrowSchema()) == pa.date32() + + +def test_time_type_to_pyarrow() -> None: + iceberg_type = TimeType() + assert visit(iceberg_type, _ConvertToArrowSchema()) == pa.time64("us") + + +def test_timestamp_type_to_pyarrow() -> None: + iceberg_type = TimestampType() + assert visit(iceberg_type, _ConvertToArrowSchema()) == pa.timestamp(unit="us") + + +def test_timestamptz_type_to_pyarrow() -> None: + iceberg_type = TimestamptzType() + assert visit(iceberg_type, _ConvertToArrowSchema()) == pa.timestamp(unit="us", tz="UTC") + + +def test_string_type_to_pyarrow() -> None: + iceberg_type = StringType() + assert visit(iceberg_type, _ConvertToArrowSchema()) == pa.string() + + +def test_binary_type_to_pyarrow() -> None: + iceberg_type = BinaryType() + assert visit(iceberg_type, _ConvertToArrowSchema()) == pa.binary() + + +def test_struct_type_to_pyarrow(table_schema_simple: Schema) -> None: + expected = pa.struct( + [ + pa.field("foo", pa.string(), nullable=True, metadata={"field_id": "1"}), + pa.field("bar", pa.int32(), nullable=False, metadata={"field_id": "2"}), + pa.field("baz", pa.bool_(), nullable=True, metadata={"field_id": "3"}), + ] + ) + assert visit(table_schema_simple.as_struct(), _ConvertToArrowSchema()) == expected + + +def test_map_type_to_pyarrow() -> None: + iceberg_map = MapType( + key_id=1, + key_type=IntegerType(), + value_id=2, + value_type=StringType(), + value_required=True, + ) + assert visit(iceberg_map, _ConvertToArrowSchema()) == pa.map_( + pa.field("key", pa.int32(), nullable=False, metadata={"field_id": "1"}), + pa.field("value", pa.string(), nullable=False, metadata={"field_id": "2"}), + ) + + +def test_list_type_to_pyarrow() -> None: + iceberg_map = ListType( + element_id=1, + element_type=IntegerType(), + element_required=True, + ) + assert visit(iceberg_map, _ConvertToArrowSchema()) == pa.list_( + pa.field("element", pa.int32(), nullable=False, metadata={"field_id": "1"}) + ) + + +@pytest.fixture +def bound_reference(table_schema_simple: Schema) -> BoundReference[str]: + return BoundReference(table_schema_simple.find_field(1), table_schema_simple.accessor_for_field(1)) + + +@pytest.fixture +def bound_double_reference() -> BoundReference[float]: + schema = Schema( + NestedField(field_id=1, name="foo", field_type=DoubleType(), required=False), + schema_id=1, + identifier_field_ids=[], + ) + return BoundReference(schema.find_field(1), schema.accessor_for_field(1)) + + +def test_expr_is_null_to_pyarrow(bound_reference: BoundReference[str]) -> None: + assert ( + repr(expression_to_pyarrow(BoundIsNull(bound_reference))) + == "" + ) + + +def test_expr_not_null_to_pyarrow(bound_reference: BoundReference[str]) -> None: + assert repr(expression_to_pyarrow(BoundNotNull(bound_reference))) == "" + + +def test_expr_is_nan_to_pyarrow(bound_double_reference: BoundReference[str]) -> None: + assert repr(expression_to_pyarrow(BoundIsNaN(bound_double_reference))) == "" + + +def test_expr_not_nan_to_pyarrow(bound_double_reference: BoundReference[str]) -> None: + assert repr(expression_to_pyarrow(BoundNotNaN(bound_double_reference))) == "" + + +def test_expr_equal_to_pyarrow(bound_reference: BoundReference[str]) -> None: + assert ( + repr(expression_to_pyarrow(BoundEqualTo(bound_reference, literal("hello")))) + == '' + ) + + +def test_expr_not_equal_to_pyarrow(bound_reference: BoundReference[str]) -> None: + assert ( + repr(expression_to_pyarrow(BoundNotEqualTo(bound_reference, literal("hello")))) + == '' + ) + + +def test_expr_greater_than_or_equal_equal_to_pyarrow(bound_reference: BoundReference[str]) -> None: + assert ( + repr(expression_to_pyarrow(BoundGreaterThanOrEqual(bound_reference, literal("hello")))) + == '= "hello")>' + ) + + +def test_expr_greater_than_to_pyarrow(bound_reference: BoundReference[str]) -> None: + assert ( + repr(expression_to_pyarrow(BoundGreaterThan(bound_reference, literal("hello")))) + == ' "hello")>' + ) + + +def test_expr_less_than_to_pyarrow(bound_reference: BoundReference[str]) -> None: + assert ( + repr(expression_to_pyarrow(BoundLessThan(bound_reference, literal("hello")))) + == '' + ) + + +def test_expr_less_than_or_equal_to_pyarrow(bound_reference: BoundReference[str]) -> None: + assert ( + repr(expression_to_pyarrow(BoundLessThanOrEqual(bound_reference, literal("hello")))) + == '' + ) + + +def test_expr_in_to_pyarrow(bound_reference: BoundReference[str]) -> None: + assert repr(expression_to_pyarrow(BoundIn(bound_reference, {literal("hello"), literal("world")}))) in ( + """""", + """""", + ) + + +def test_expr_not_in_to_pyarrow(bound_reference: BoundReference[str]) -> None: + assert repr(expression_to_pyarrow(BoundNotIn(bound_reference, {literal("hello"), literal("world")}))) in ( + """""", + """""", + ) + + +def test_expr_starts_with_to_pyarrow(bound_reference: BoundReference[str]) -> None: + assert ( + repr(expression_to_pyarrow(BoundStartsWith(bound_reference, literal("he")))) + == '' + ) + + +def test_expr_not_starts_with_to_pyarrow(bound_reference: BoundReference[str]) -> None: + assert ( + repr(expression_to_pyarrow(BoundNotStartsWith(bound_reference, literal("he")))) + == '' + ) + + +def test_and_to_pyarrow(bound_reference: BoundReference[str]) -> None: + assert ( + repr(expression_to_pyarrow(And(BoundEqualTo(bound_reference, literal("hello")), BoundIsNull(bound_reference)))) + == '' + ) + + +def test_or_to_pyarrow(bound_reference: BoundReference[str]) -> None: + assert ( + repr(expression_to_pyarrow(Or(BoundEqualTo(bound_reference, literal("hello")), BoundIsNull(bound_reference)))) + == '' + ) + + +def test_not_to_pyarrow(bound_reference: BoundReference[str]) -> None: + assert ( + repr(expression_to_pyarrow(Not(BoundEqualTo(bound_reference, literal("hello"))))) + == '' + ) + + +def test_always_true_to_pyarrow(bound_reference: BoundReference[str]) -> None: + assert repr(expression_to_pyarrow(AlwaysTrue())) == "" + + +def test_always_false_to_pyarrow(bound_reference: BoundReference[str]) -> None: + assert repr(expression_to_pyarrow(AlwaysFalse())) == "" + + +@pytest.fixture +def schema_int() -> Schema: + return Schema(NestedField(1, "id", IntegerType(), required=False)) + + +@pytest.fixture +def schema_int_str() -> Schema: + return Schema(NestedField(1, "id", IntegerType(), required=False), NestedField(2, "data", StringType(), required=False)) + + +@pytest.fixture +def schema_str() -> Schema: + return Schema(NestedField(2, "data", StringType(), required=False)) + + +@pytest.fixture +def schema_long() -> Schema: + return Schema(NestedField(3, "id", LongType(), required=False)) + + +@pytest.fixture +def schema_struct() -> Schema: + return Schema( + NestedField( + 4, + "location", + StructType( + NestedField(41, "lat", DoubleType()), + NestedField(42, "long", DoubleType()), + ), + ) + ) + + +@pytest.fixture +def schema_list() -> Schema: + return Schema( + NestedField(5, "ids", ListType(51, IntegerType(), element_required=False), required=False), + ) + + +@pytest.fixture +def schema_list_of_structs() -> Schema: + return Schema( + NestedField( + 5, + "locations", + ListType( + 51, + StructType(NestedField(511, "lat", DoubleType()), NestedField(512, "long", DoubleType())), + element_required=False, + ), + required=False, + ), + ) + + +@pytest.fixture +def schema_map() -> Schema: + return Schema( + NestedField( + 5, + "properties", + MapType( + key_id=51, + key_type=StringType(), + value_id=52, + value_type=StringType(), + value_required=True, + ), + required=False, + ), + ) + + +def _write_table_to_file(filepath: str, schema: pa.Schema, table: pa.Table) -> str: + with pq.ParquetWriter(filepath, schema) as writer: + writer.write_table(table) + return filepath + + +@pytest.fixture +def file_int(schema_int: Schema, tmpdir: str) -> str: + pyarrow_schema = pa.schema(schema_to_pyarrow(schema_int), metadata={"iceberg.schema": schema_int.model_dump_json()}) + return _write_table_to_file( + f"file:{tmpdir}/a.parquet", pyarrow_schema, pa.Table.from_arrays([pa.array([0, 1, 2])], schema=pyarrow_schema) + ) + + +@pytest.fixture +def file_int_str(schema_int_str: Schema, tmpdir: str) -> str: + pyarrow_schema = pa.schema(schema_to_pyarrow(schema_int_str), metadata={"iceberg.schema": schema_int_str.model_dump_json()}) + return _write_table_to_file( + f"file:{tmpdir}/a.parquet", + pyarrow_schema, + pa.Table.from_arrays([pa.array([0, 1, 2]), pa.array(["0", "1", "2"])], schema=pyarrow_schema), + ) + + +@pytest.fixture +def file_string(schema_str: Schema, tmpdir: str) -> str: + pyarrow_schema = pa.schema(schema_to_pyarrow(schema_str), metadata={"iceberg.schema": schema_str.model_dump_json()}) + return _write_table_to_file( + f"file:{tmpdir}/b.parquet", pyarrow_schema, pa.Table.from_arrays([pa.array(["0", "1", "2"])], schema=pyarrow_schema) + ) + + +@pytest.fixture +def file_long(schema_long: Schema, tmpdir: str) -> str: + pyarrow_schema = pa.schema(schema_to_pyarrow(schema_long), metadata={"iceberg.schema": schema_long.model_dump_json()}) + return _write_table_to_file( + f"file:{tmpdir}/c.parquet", pyarrow_schema, pa.Table.from_arrays([pa.array([0, 1, 2])], schema=pyarrow_schema) + ) + + +@pytest.fixture +def file_struct(schema_struct: Schema, tmpdir: str) -> str: + pyarrow_schema = pa.schema(schema_to_pyarrow(schema_struct), metadata={"iceberg.schema": schema_struct.model_dump_json()}) + return _write_table_to_file( + f"file:{tmpdir}/d.parquet", + pyarrow_schema, + pa.Table.from_pylist( + [ + {"location": {"lat": 52.371807, "long": 4.896029}}, + {"location": {"lat": 52.387386, "long": 4.646219}}, + {"location": {"lat": 52.078663, "long": 4.288788}}, + ], + schema=pyarrow_schema, + ), + ) + + +@pytest.fixture +def file_list(schema_list: Schema, tmpdir: str) -> str: + pyarrow_schema = pa.schema(schema_to_pyarrow(schema_list), metadata={"iceberg.schema": schema_list.model_dump_json()}) + return _write_table_to_file( + f"file:{tmpdir}/e.parquet", + pyarrow_schema, + pa.Table.from_pylist( + [ + {"ids": list(range(1, 10))}, + {"ids": list(range(2, 20))}, + {"ids": list(range(3, 30))}, + ], + schema=pyarrow_schema, + ), + ) + + +@pytest.fixture +def file_list_of_structs(schema_list_of_structs: Schema, tmpdir: str) -> str: + pyarrow_schema = pa.schema( + schema_to_pyarrow(schema_list_of_structs), metadata={"iceberg.schema": schema_list_of_structs.model_dump_json()} + ) + return _write_table_to_file( + f"file:{tmpdir}/e.parquet", + pyarrow_schema, + pa.Table.from_pylist( + [ + {"locations": [{"lat": 52.371807, "long": 4.896029}, {"lat": 52.387386, "long": 4.646219}]}, + {"locations": []}, + {"locations": [{"lat": 52.078663, "long": 4.288788}, {"lat": 52.387386, "long": 4.646219}]}, + ], + schema=pyarrow_schema, + ), + ) + + +@pytest.fixture +def file_map(schema_map: Schema, tmpdir: str) -> str: + pyarrow_schema = pa.schema(schema_to_pyarrow(schema_map), metadata={"iceberg.schema": schema_map.model_dump_json()}) + return _write_table_to_file( + f"file:{tmpdir}/e.parquet", + pyarrow_schema, + pa.Table.from_pylist( + [ + {"properties": [("a", "b")]}, + {"properties": [("c", "d")]}, + {"properties": [("e", "f"), ("g", "h")]}, + ], + schema=pyarrow_schema, + ), + ) + + +def project( + schema: Schema, files: List[str], expr: Optional[BooleanExpression] = None, table_schema: Optional[Schema] = None +) -> pa.Table: + return project_table( + [ + FileScanTask( + DataFile( + content=DataFileContent.DATA, + file_path=file, + file_format=FileFormat.PARQUET, + partition={}, + record_count=3, + file_size_in_bytes=3, + ) + ) + for file in files + ], + Table( + ("namespace", "table"), + metadata=TableMetadataV2( + location="file://a/b/", + last_column_id=1, + format_version=2, + schemas=[table_schema or schema], + partition_specs=[PartitionSpec()], + ), + metadata_location="file://a/b/c.json", + io=PyArrowFileIO(), + catalog=NoopCatalog("NoopCatalog"), + ), + expr or AlwaysTrue(), + schema, + case_sensitive=True, + ) + + +def test_projection_add_column(file_int: str) -> None: + schema = Schema( + # All new IDs + NestedField(10, "id", IntegerType(), required=False), + NestedField(20, "list", ListType(21, IntegerType(), element_required=False), required=False), + NestedField( + 30, + "map", + MapType(key_id=31, key_type=IntegerType(), value_id=32, value_type=StringType(), value_required=False), + required=False, + ), + NestedField( + 40, + "location", + StructType( + NestedField(41, "lat", DoubleType(), required=False), NestedField(42, "lon", DoubleType(), required=False) + ), + required=False, + ), + ) + result_table = project(schema, [file_int]) + + for col in result_table.columns: + assert len(col) == 3 + + for actual, expected in zip(result_table.columns[0], [None, None, None]): + assert actual.as_py() == expected + + for actual, expected in zip(result_table.columns[1], [None, None, None]): + assert actual.as_py() == expected + + for actual, expected in zip(result_table.columns[2], [None, None, None]): + assert actual.as_py() == expected + + for actual, expected in zip(result_table.columns[3], [None, None, None]): + assert actual.as_py() == expected + assert ( + repr(result_table.schema) + == """id: int32 +list: list + child 0, element: int32 + -- field metadata -- + field_id: '21' +map: map + child 0, entries: struct not null + child 0, key: int32 not null + -- field metadata -- + field_id: '31' + child 1, value: string + -- field metadata -- + field_id: '32' +location: struct + child 0, lat: double + -- field metadata -- + field_id: '41' + child 1, lon: double + -- field metadata -- + field_id: '42'""" + ) + + +def test_read_list(schema_list: Schema, file_list: str) -> None: + result_table = project(schema_list, [file_list]) + + assert len(result_table.columns[0]) == 3 + for actual, expected in zip(result_table.columns[0], [list(range(1, 10)), list(range(2, 20)), list(range(3, 30))]): + assert actual.as_py() == expected + + assert repr(result_table.schema) == "ids: list\n child 0, item: int32" + + +def test_read_map(schema_map: Schema, file_map: str) -> None: + result_table = project(schema_map, [file_map]) + + assert len(result_table.columns[0]) == 3 + for actual, expected in zip(result_table.columns[0], [[("a", "b")], [("c", "d")], [("e", "f"), ("g", "h")]]): + assert actual.as_py() == expected + + assert ( + repr(result_table.schema) + == """properties: map + child 0, entries: struct not null + child 0, key: string not null + child 1, value: string""" + ) + + +def test_projection_add_column_struct(schema_int: Schema, file_int: str) -> None: + schema = Schema( + # A new ID + NestedField( + 2, + "id", + MapType(key_id=3, key_type=IntegerType(), value_id=4, value_type=StringType(), value_required=False), + required=False, + ) + ) + result_table = project(schema, [file_int]) + # Everything should be None + for r in result_table.columns[0]: + assert r.as_py() is None + assert ( + repr(result_table.schema) + == """id: map + child 0, entries: struct not null + child 0, key: int32 not null + -- field metadata -- + field_id: '3' + child 1, value: string + -- field metadata -- + field_id: '4'""" + ) + + +def test_projection_add_column_struct_required(file_int: str) -> None: + schema = Schema( + # A new ID + NestedField( + 2, + "other_id", + IntegerType(), + required=True, + ) + ) + with pytest.raises(ResolveError) as exc_info: + _ = project(schema, [file_int]) + assert "Field is required, and could not be found in the file: 2: other_id: required int" in str(exc_info.value) + + +def test_projection_rename_column(schema_int: Schema, file_int: str) -> None: + schema = Schema( + # Reuses the id 1 + NestedField(1, "other_name", IntegerType()) + ) + result_table = project(schema, [file_int]) + assert len(result_table.columns[0]) == 3 + for actual, expected in zip(result_table.columns[0], [0, 1, 2]): + assert actual.as_py() == expected + + assert repr(result_table.schema) == "other_name: int32 not null" + + +def test_projection_concat_files(schema_int: Schema, file_int: str) -> None: + result_table = project(schema_int, [file_int, file_int]) + + for actual, expected in zip(result_table.columns[0], [0, 1, 2, 0, 1, 2]): + assert actual.as_py() == expected + assert len(result_table.columns[0]) == 6 + assert repr(result_table.schema) == "id: int32" + + +def test_projection_filter(schema_int: Schema, file_int: str) -> None: + result_table = project(schema_int, [file_int], GreaterThan("id", 4)) + assert len(result_table.columns[0]) == 0 + assert ( + repr(result_table.schema) + == """id: int32 + -- field metadata -- + field_id: '1'""" + ) + + +def test_projection_filter_renamed_column(file_int: str) -> None: + schema = Schema( + # Reuses the id 1 + NestedField(1, "other_id", IntegerType()) + ) + result_table = project(schema, [file_int], GreaterThan("other_id", 1)) + assert len(result_table.columns[0]) == 1 + assert repr(result_table.schema) == "other_id: int32 not null" + + +def test_projection_filter_add_column(schema_int: Schema, file_int: str, file_string: str) -> None: + """We have one file that has the column, and the other one doesn't""" + result_table = project(schema_int, [file_int, file_string]) + + for actual, expected in zip(result_table.columns[0], [0, 1, 2, None, None, None]): + assert actual.as_py() == expected + assert len(result_table.columns[0]) == 6 + assert repr(result_table.schema) == "id: int32" + + +def test_projection_filter_add_column_promote(file_int: str) -> None: + schema_long = Schema(NestedField(1, "id", LongType())) + result_table = project(schema_long, [file_int]) + + for actual, expected in zip(result_table.columns[0], [0, 1, 2]): + assert actual.as_py() == expected + assert len(result_table.columns[0]) == 3 + assert repr(result_table.schema) == "id: int64 not null" + + +def test_projection_filter_add_column_demote(file_long: str) -> None: + schema_int = Schema(NestedField(3, "id", IntegerType())) + with pytest.raises(ResolveError) as exc_info: + _ = project(schema_int, [file_long]) + assert "Cannot promote long to int" in str(exc_info.value) + + +def test_projection_nested_struct_subset(file_struct: str) -> None: + schema = Schema( + NestedField( + 4, + "location", + StructType( + NestedField(41, "lat", DoubleType()), + # long is missing! + ), + ) + ) + + result_table = project(schema, [file_struct]) + + for actual, expected in zip(result_table.columns[0], [52.371807, 52.387386, 52.078663]): + assert actual.as_py() == {"lat": expected} + + assert len(result_table.columns[0]) == 3 + assert repr(result_table.schema) == "location: struct not null\n child 0, lat: double not null" + + +def test_projection_nested_new_field(file_struct: str) -> None: + schema = Schema( + NestedField( + 4, + "location", + StructType( + NestedField(43, "null", DoubleType(), required=False), # Whoa, this column doesn't exist in the file + ), + ) + ) + + result_table = project(schema, [file_struct]) + + for actual, expected in zip(result_table.columns[0], [None, None, None]): + assert actual.as_py() == {"null": expected} + assert len(result_table.columns[0]) == 3 + assert repr(result_table.schema) == "location: struct not null\n child 0, null: double" + + +def test_projection_nested_struct(schema_struct: Schema, file_struct: str) -> None: + schema = Schema( + NestedField( + 4, + "location", + StructType( + NestedField(41, "lat", DoubleType(), required=False), + NestedField(43, "null", DoubleType(), required=False), + NestedField(42, "long", DoubleType(), required=False), + ), + ) + ) + + result_table = project(schema, [file_struct]) + for actual, expected in zip( + result_table.columns[0], + [ + {"lat": 52.371807, "long": 4.896029, "null": None}, + {"lat": 52.387386, "long": 4.646219, "null": None}, + {"lat": 52.078663, "long": 4.288788, "null": None}, + ], + ): + assert actual.as_py() == expected + assert len(result_table.columns[0]) == 3 + assert ( + repr(result_table.schema) + == "location: struct not null\n child 0, lat: double\n child 1, null: double\n child 2, long: double" + ) + + +def test_projection_list_of_structs(schema_list_of_structs: Schema, file_list_of_structs: str) -> None: + schema = Schema( + NestedField( + 5, + "locations", + ListType( + 51, + StructType( + NestedField(511, "latitude", DoubleType()), + NestedField(512, "longitude", DoubleType()), + NestedField(513, "altitude", DoubleType(), required=False), + ), + element_required=False, + ), + required=False, + ), + ) + + result_table = project(schema, [file_list_of_structs]) + assert len(result_table.columns) == 1 + assert len(result_table.columns[0]) == 3 + for actual, expected in zip( + result_table.columns[0], + [ + [ + {"latitude": 52.371807, "longitude": 4.896029, "altitude": None}, + {"latitude": 52.387386, "longitude": 4.646219, "altitude": None}, + ], + [], + [ + {"latitude": 52.078663, "longitude": 4.288788, "altitude": None}, + {"latitude": 52.387386, "longitude": 4.646219, "altitude": None}, + ], + ], + ): + assert actual.as_py() == expected + assert ( + repr(result_table.schema) + == """locations: list> + child 0, item: struct + child 0, latitude: double not null + child 1, longitude: double not null + child 2, altitude: double""" + ) + + +def test_projection_nested_struct_different_parent_id(file_struct: str) -> None: + schema = Schema( + NestedField( + 5, # 😱 this is 4 in the file, this will be fixed when projecting the file schema + "location", + StructType( + NestedField(41, "lat", DoubleType(), required=False), NestedField(42, "long", DoubleType(), required=False) + ), + required=False, + ) + ) + + result_table = project(schema, [file_struct]) + for actual, expected in zip(result_table.columns[0], [None, None, None]): + assert actual.as_py() == expected + assert len(result_table.columns[0]) == 3 + assert ( + repr(result_table.schema) + == """location: struct + child 0, lat: double + -- field metadata -- + field_id: '41' + child 1, long: double + -- field metadata -- + field_id: '42'""" + ) + + +def test_projection_filter_on_unprojected_field(schema_int_str: Schema, file_int_str: str) -> None: + schema = Schema(NestedField(1, "id", IntegerType())) + + result_table = project(schema, [file_int_str], GreaterThan("data", "1"), schema_int_str) + + for actual, expected in zip( + result_table.columns[0], + [2], + ): + assert actual.as_py() == expected + assert len(result_table.columns[0]) == 1 + assert repr(result_table.schema) == "id: int32 not null" + + +def test_projection_filter_on_unknown_field(schema_int_str: Schema, file_int_str: str) -> None: + schema = Schema(NestedField(1, "id", IntegerType())) + + with pytest.raises(ValueError) as exc_info: + _ = project(schema, [file_int_str], GreaterThan("unknown_field", "1"), schema_int_str) + + assert "Could not find field with name unknown_field, case_sensitive=True" in str(exc_info.value) + + +@pytest.fixture +def deletes_file(tmp_path: str, example_task: FileScanTask) -> str: + path = example_task.file.file_path + table = pa.table({"file_path": [path, path, path], "pos": [1, 3, 5]}) + + deletes_file_path = f"{tmp_path}/deletes.parquet" + pq.write_table(table, deletes_file_path) + + return deletes_file_path + + +def test_read_deletes(deletes_file: str, example_task: FileScanTask) -> None: + deletes = _read_deletes(LocalFileSystem(), DataFile(file_path=deletes_file, file_format=FileFormat.PARQUET)) + assert set(deletes.keys()) == {example_task.file.file_path} + assert list(deletes.values())[0] == pa.chunked_array([[1, 3, 5]]) + + +def test_delete(deletes_file: str, example_task: FileScanTask, table_schema_simple: Schema) -> None: + metadata_location = "file://a/b/c.json" + example_task_with_delete = FileScanTask( + data_file=example_task.file, + delete_files={DataFile(content=DataFileContent.POSITION_DELETES, file_path=deletes_file, file_format=FileFormat.PARQUET)}, + ) + + with_deletes = project_table( + tasks=[example_task_with_delete], + table=Table( + ("namespace", "table"), + metadata=TableMetadataV2( + location=metadata_location, + last_column_id=1, + format_version=2, + current_schema_id=1, + schemas=[table_schema_simple], + partition_specs=[PartitionSpec()], + ), + metadata_location=metadata_location, + io=load_file_io(), + catalog=NoopCatalog("noop"), + ), + row_filter=AlwaysTrue(), + projected_schema=table_schema_simple, + ) + + assert ( + str(with_deletes) + == """pyarrow.Table +foo: string +bar: int64 not null +baz: bool +---- +foo: [["a","c"]] +bar: [[1,3]] +baz: [[true,null]]""" + ) + + +def test_delete_duplicates(deletes_file: str, example_task: FileScanTask, table_schema_simple: Schema) -> None: + metadata_location = "file://a/b/c.json" + example_task_with_delete = FileScanTask( + data_file=example_task.file, + delete_files={ + DataFile(content=DataFileContent.POSITION_DELETES, file_path=deletes_file, file_format=FileFormat.PARQUET), + DataFile(content=DataFileContent.POSITION_DELETES, file_path=deletes_file, file_format=FileFormat.PARQUET), + }, + ) + + with_deletes = project_table( + tasks=[example_task_with_delete], + table=Table( + ("namespace", "table"), + metadata=TableMetadataV2( + location=metadata_location, + last_column_id=1, + format_version=2, + current_schema_id=1, + schemas=[table_schema_simple], + partition_specs=[PartitionSpec()], + ), + metadata_location=metadata_location, + io=load_file_io(), + catalog=NoopCatalog("noop"), + ), + row_filter=AlwaysTrue(), + projected_schema=table_schema_simple, + ) + + assert ( + str(with_deletes) + == """pyarrow.Table +foo: string +bar: int64 not null +baz: bool +---- +foo: [["a","c"]] +bar: [[1,3]] +baz: [[true,null]]""" + ) + + +def test_pyarrow_wrap_fsspec(example_task: FileScanTask, table_schema_simple: Schema) -> None: + metadata_location = "file://a/b/c.json" + projection = project_table( + [example_task], + Table( + ("namespace", "table"), + metadata=TableMetadataV2( + location=metadata_location, + last_column_id=1, + format_version=2, + current_schema_id=1, + schemas=[table_schema_simple], + partition_specs=[PartitionSpec()], + ), + metadata_location=metadata_location, + io=load_file_io(properties={"py-io-impl": "pyiceberg.io.fsspec.FsspecFileIO"}, location=metadata_location), + catalog=NoopCatalog("NoopCatalog"), + ), + case_sensitive=True, + projected_schema=table_schema_simple, + row_filter=AlwaysTrue(), + ) + + assert ( + str(projection) + == """pyarrow.Table +foo: string +bar: int64 not null +baz: bool +---- +foo: [["a","b","c"]] +bar: [[1,2,3]] +baz: [[true,false,null]]""" + ) + + +@pytest.mark.gcs +def test_new_input_file_gcs(pyarrow_fileio_gcs: PyArrowFileIO) -> None: + """Test creating a new input file from a fsspec file-io""" + filename = str(uuid4()) + + input_file = pyarrow_fileio_gcs.new_input(f"gs://warehouse/{filename}") + + assert isinstance(input_file, PyArrowFile) + assert input_file.location == f"gs://warehouse/{filename}" + + +@pytest.mark.gcs +def test_new_output_file_gcs(pyarrow_fileio_gcs: PyArrowFileIO) -> None: + """Test creating a new output file from an fsspec file-io""" + filename = str(uuid4()) + + output_file = pyarrow_fileio_gcs.new_output(f"gs://warehouse/{filename}") + + assert isinstance(output_file, PyArrowFile) + assert output_file.location == f"gs://warehouse/{filename}" + + +@pytest.mark.gcs +@pytest.mark.skip(reason="Open issue on Arrow: https://github.com/apache/arrow/issues/36993") +def test_write_and_read_file_gcs(pyarrow_fileio_gcs: PyArrowFileIO) -> None: + """Test writing and reading a file using FsspecInputFile and FsspecOutputFile""" + location = f"gs://warehouse/{uuid4()}.txt" + output_file = pyarrow_fileio_gcs.new_output(location=location) + with output_file.create() as f: + assert f.write(b"foo") == 3 + + assert output_file.exists() + + input_file = pyarrow_fileio_gcs.new_input(location=location) + with input_file.open() as f: + assert f.read() == b"foo" + + pyarrow_fileio_gcs.delete(input_file) + + +@pytest.mark.gcs +def test_getting_length_of_file_gcs(pyarrow_fileio_gcs: PyArrowFileIO) -> None: + """Test getting the length of an FsspecInputFile and FsspecOutputFile""" + filename = str(uuid4()) + + output_file = pyarrow_fileio_gcs.new_output(location=f"gs://warehouse/{filename}") + with output_file.create() as f: + f.write(b"foobar") + + assert len(output_file) == 6 + + input_file = pyarrow_fileio_gcs.new_input(location=f"gs://warehouse/{filename}") + assert len(input_file) == 6 + + pyarrow_fileio_gcs.delete(output_file) + + +@pytest.mark.gcs +@pytest.mark.skip(reason="Open issue on Arrow: https://github.com/apache/arrow/issues/36993") +def test_file_tell_gcs(pyarrow_fileio_gcs: PyArrowFileIO) -> None: + location = f"gs://warehouse/{uuid4()}" + + output_file = pyarrow_fileio_gcs.new_output(location=location) + with output_file.create() as write_file: + write_file.write(b"foobar") + + input_file = pyarrow_fileio_gcs.new_input(location=location) + with input_file.open() as f: + f.seek(0) + assert f.tell() == 0 + f.seek(1) + assert f.tell() == 1 + f.seek(3) + assert f.tell() == 3 + f.seek(0) + assert f.tell() == 0 + + +@pytest.mark.gcs +@pytest.mark.skip(reason="Open issue on Arrow: https://github.com/apache/arrow/issues/36993") +def test_read_specified_bytes_for_file_gcs(pyarrow_fileio_gcs: PyArrowFileIO) -> None: + location = f"gs://warehouse/{uuid4()}" + + output_file = pyarrow_fileio_gcs.new_output(location=location) + with output_file.create() as write_file: + write_file.write(b"foo") + + input_file = pyarrow_fileio_gcs.new_input(location=location) + with input_file.open() as f: + f.seek(0) + assert b"f" == f.read(1) + f.seek(0) + assert b"fo" == f.read(2) + f.seek(1) + assert b"o" == f.read(1) + f.seek(1) + assert b"oo" == f.read(2) + f.seek(0) + assert b"foo" == f.read(999) # test reading amount larger than entire content length + + pyarrow_fileio_gcs.delete(input_file) + + +@pytest.mark.gcs +@pytest.mark.skip(reason="Open issue on Arrow: https://github.com/apache/arrow/issues/36993") +def test_raise_on_opening_file_not_found_gcs(pyarrow_fileio_gcs: PyArrowFileIO) -> None: + """Test that an fsspec input file raises appropriately when the gcs file is not found""" + + filename = str(uuid4()) + input_file = pyarrow_fileio_gcs.new_input(location=f"gs://warehouse/{filename}") + with pytest.raises(FileNotFoundError) as exc_info: + input_file.open().read() + + assert filename in str(exc_info.value) + + +@pytest.mark.gcs +def test_checking_if_a_file_exists_gcs(pyarrow_fileio_gcs: PyArrowFileIO) -> None: + """Test checking if a file exists""" + non_existent_file = pyarrow_fileio_gcs.new_input(location="gs://warehouse/does-not-exist.txt") + assert not non_existent_file.exists() + + location = f"gs://warehouse/{uuid4()}" + output_file = pyarrow_fileio_gcs.new_output(location=location) + assert not output_file.exists() + with output_file.create() as f: + f.write(b"foo") + + existing_input_file = pyarrow_fileio_gcs.new_input(location=location) + assert existing_input_file.exists() + + existing_output_file = pyarrow_fileio_gcs.new_output(location=location) + assert existing_output_file.exists() + + pyarrow_fileio_gcs.delete(existing_output_file) + + +@pytest.mark.gcs +@pytest.mark.skip(reason="Open issue on Arrow: https://github.com/apache/arrow/issues/36993") +def test_closing_a_file_gcs(pyarrow_fileio_gcs: PyArrowFileIO) -> None: + """Test closing an output file and input file""" + filename = str(uuid4()) + output_file = pyarrow_fileio_gcs.new_output(location=f"gs://warehouse/{filename}") + with output_file.create() as write_file: + write_file.write(b"foo") + assert not write_file.closed # type: ignore + assert write_file.closed # type: ignore + + input_file = pyarrow_fileio_gcs.new_input(location=f"gs://warehouse/{filename}") + with input_file.open() as f: + assert not f.closed # type: ignore + assert f.closed # type: ignore + + pyarrow_fileio_gcs.delete(f"gs://warehouse/{filename}") + + +@pytest.mark.gcs +def test_converting_an_outputfile_to_an_inputfile_gcs(pyarrow_fileio_gcs: PyArrowFileIO) -> None: + """Test converting an output file to an input file""" + filename = str(uuid4()) + output_file = pyarrow_fileio_gcs.new_output(location=f"gs://warehouse/{filename}") + input_file = output_file.to_input_file() + assert input_file.location == output_file.location + + +@pytest.mark.gcs +@pytest.mark.skip(reason="Open issue on Arrow: https://github.com/apache/arrow/issues/36993") +def test_writing_avro_file_gcs(generated_manifest_entry_file: str, pyarrow_fileio_gcs: PyArrowFileIO) -> None: + """Test that bytes match when reading a local avro file, writing it using fsspec file-io, and then reading it again""" + filename = str(uuid4()) + with PyArrowFileIO().new_input(location=generated_manifest_entry_file).open() as f: + b1 = f.read() + with pyarrow_fileio_gcs.new_output(location=f"gs://warehouse/{filename}").create() as out_f: + out_f.write(b1) + with pyarrow_fileio_gcs.new_input(location=f"gs://warehouse/{filename}").open() as in_f: + b2 = in_f.read() + assert b1 == b2 # Check that bytes of read from local avro file match bytes written to s3 + + pyarrow_fileio_gcs.delete(f"gs://warehouse/{filename}") + + +def test_parse_location() -> None: + def check_results(location: str, expected_schema: str, expected_netloc: str, expected_uri: str) -> None: + schema, netloc, uri = PyArrowFileIO.parse_location(location) + assert schema == expected_schema + assert netloc == expected_netloc + assert uri == expected_uri + + check_results("hdfs://127.0.0.1:9000/root/foo.txt", "hdfs", "127.0.0.1:9000", "hdfs://127.0.0.1:9000/root/foo.txt") + check_results("hdfs://127.0.0.1/root/foo.txt", "hdfs", "127.0.0.1", "hdfs://127.0.0.1/root/foo.txt") + check_results("hdfs://clusterA/root/foo.txt", "hdfs", "clusterA", "hdfs://clusterA/root/foo.txt") + + check_results("/root/foo.txt", "file", "", "/root/foo.txt") + check_results("/root/tmp/foo.txt", "file", "", "/root/tmp/foo.txt") diff --git a/tests/io/test_pyarrow_stats.py b/tests/io/test_pyarrow_stats.py new file mode 100644 index 0000000000..74297fe526 --- /dev/null +++ b/tests/io/test_pyarrow_stats.py @@ -0,0 +1,798 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# pylint: disable=protected-access,unused-argument,redefined-outer-name + +import math +import tempfile +import uuid +from dataclasses import asdict, dataclass +from datetime import ( + date, + datetime, + time, + timedelta, + timezone, +) +from typing import ( + Any, + Dict, + List, + Optional, + Tuple, + Union, +) + +import pyarrow as pa +import pyarrow.parquet as pq +import pytest + +from pyiceberg.avro import ( + STRUCT_BOOL, + STRUCT_DOUBLE, + STRUCT_FLOAT, + STRUCT_INT32, + STRUCT_INT64, +) +from pyiceberg.io.pyarrow import ( + MetricModeTypes, + MetricsMode, + PyArrowStatisticsCollector, + compute_statistics_plan, + fill_parquet_file_metadata, + match_metrics_mode, + parquet_path_to_id_mapping, + schema_to_pyarrow, +) +from pyiceberg.manifest import DataFile +from pyiceberg.schema import Schema, pre_order_visit +from pyiceberg.table.metadata import ( + TableMetadata, + TableMetadataUtil, + TableMetadataV1, + TableMetadataV2, +) +from pyiceberg.types import ( + BooleanType, + FloatType, + IntegerType, + StringType, +) +from pyiceberg.utils.datetime import date_to_days, datetime_to_micros, time_to_micros + + +@dataclass(frozen=True) +class TestStruct: + x: Optional[int] + y: Optional[float] + + +def construct_test_table() -> Tuple[Any, Any, Union[TableMetadataV1, TableMetadataV2]]: + table_metadata = { + "format-version": 2, + "location": "s3://bucket/test/location", + "last-column-id": 7, + "current-schema-id": 0, + "schemas": [ + { + "type": "struct", + "schema-id": 0, + "fields": [ + {"id": 1, "name": "strings", "required": False, "type": "string"}, + {"id": 2, "name": "floats", "required": False, "type": "float"}, + { + "id": 3, + "name": "list", + "required": False, + "type": {"type": "list", "element-id": 6, "element": "long", "element-required": False}, + }, + { + "id": 4, + "name": "maps", + "required": False, + "type": { + "type": "map", + "key-id": 7, + "key": "long", + "value-id": 8, + "value": "long", + "value-required": False, + }, + }, + { + "id": 5, + "name": "structs", + "required": False, + "type": { + "type": "struct", + "fields": [ + {"id": 9, "name": "x", "required": False, "type": "long"}, + {"id": 10, "name": "y", "required": False, "type": "float", "doc": "comment"}, + ], + }, + }, + ], + }, + ], + "default-spec-id": 0, + "partition-specs": [{"spec-id": 0, "fields": []}], + "properties": {}, + } + + table_metadata = TableMetadataUtil.parse_obj(table_metadata) + arrow_schema = schema_to_pyarrow(table_metadata.schemas[0]) + + _strings = ["zzzzzzzzzzzzzzzzzzzz", "rrrrrrrrrrrrrrrrrrrr", None, "aaaaaaaaaaaaaaaaaaaa"] + + _floats = [3.14, math.nan, 1.69, 100] + + _list = [[1, 2, 3], [4, 5, 6], None, [7, 8, 9]] + + _maps: List[Optional[Dict[int, int]]] = [ + {1: 2, 3: 4}, + None, + {5: 6}, + {}, + ] + + _structs = [ + asdict(TestStruct(1, 0.2)), + asdict(TestStruct(None, -1.34)), + None, + asdict(TestStruct(54, None)), + ] + + table = pa.Table.from_pydict( + { + "strings": _strings, + "floats": _floats, + "list": _list, + "maps": _maps, + "structs": _structs, + }, + schema=arrow_schema, + ) + metadata_collector: List[Any] = [] + + with pa.BufferOutputStream() as f: + with pq.ParquetWriter(f, table.schema, metadata_collector=metadata_collector) as writer: + writer.write_table(table) + + return f.getvalue(), metadata_collector[0], table_metadata + + +def get_current_schema( + table_metadata: TableMetadata, +) -> Schema: + return next(filter(lambda s: s.schema_id == table_metadata.current_schema_id, table_metadata.schemas)) + + +def test_record_count() -> None: + (file_bytes, metadata, table_metadata) = construct_test_table() + + schema = get_current_schema(table_metadata) + datafile = DataFile() + fill_parquet_file_metadata( + datafile, + metadata, + len(file_bytes), + compute_statistics_plan(schema, table_metadata.properties), + parquet_path_to_id_mapping(schema), + ) + assert datafile.record_count == 4 + + +def test_file_size() -> None: + (file_bytes, metadata, table_metadata) = construct_test_table() + + schema = get_current_schema(table_metadata) + datafile = DataFile() + fill_parquet_file_metadata( + datafile, + metadata, + len(file_bytes), + compute_statistics_plan(schema, table_metadata.properties), + parquet_path_to_id_mapping(schema), + ) + + assert datafile.file_size_in_bytes == len(file_bytes) + + +def test_value_counts() -> None: + (file_bytes, metadata, table_metadata) = construct_test_table() + + schema = get_current_schema(table_metadata) + datafile = DataFile() + fill_parquet_file_metadata( + datafile, + metadata, + len(file_bytes), + compute_statistics_plan(schema, table_metadata.properties), + parquet_path_to_id_mapping(schema), + ) + + assert len(datafile.value_counts) == 7 + assert datafile.value_counts[1] == 4 + assert datafile.value_counts[2] == 4 + assert datafile.value_counts[6] == 10 # 3 lists with 3 items and a None value + assert datafile.value_counts[7] == 5 + assert datafile.value_counts[8] == 5 + assert datafile.value_counts[9] == 4 + assert datafile.value_counts[10] == 4 + + +def test_column_sizes() -> None: + (file_bytes, metadata, table_metadata) = construct_test_table() + + schema = get_current_schema(table_metadata) + datafile = DataFile() + fill_parquet_file_metadata( + datafile, + metadata, + len(file_bytes), + compute_statistics_plan(schema, table_metadata.properties), + parquet_path_to_id_mapping(schema), + ) + + assert len(datafile.column_sizes) == 7 + # these values are an artifact of how the write_table encodes the columns + assert datafile.column_sizes[1] > 0 + assert datafile.column_sizes[2] > 0 + assert datafile.column_sizes[6] > 0 + assert datafile.column_sizes[7] > 0 + assert datafile.column_sizes[8] > 0 + + +def test_null_and_nan_counts() -> None: + (file_bytes, metadata, table_metadata) = construct_test_table() + + schema = get_current_schema(table_metadata) + datafile = DataFile() + fill_parquet_file_metadata( + datafile, + metadata, + len(file_bytes), + compute_statistics_plan(schema, table_metadata.properties), + parquet_path_to_id_mapping(schema), + ) + + assert len(datafile.null_value_counts) == 7 + assert datafile.null_value_counts[1] == 1 + assert datafile.null_value_counts[2] == 0 + assert datafile.null_value_counts[6] == 1 + assert datafile.null_value_counts[7] == 2 + assert datafile.null_value_counts[8] == 2 + assert datafile.null_value_counts[9] == 2 + assert datafile.null_value_counts[10] == 2 + + # #arrow does not include this in the statistics + # assert len(datafile.nan_value_counts) == 3 + # assert datafile.nan_value_counts[1] == 0 + # assert datafile.nan_value_counts[2] == 1 + # assert datafile.nan_value_counts[3] == 0 + + +def test_bounds() -> None: + (file_bytes, metadata, table_metadata) = construct_test_table() + + schema = get_current_schema(table_metadata) + datafile = DataFile() + fill_parquet_file_metadata( + datafile, + metadata, + len(file_bytes), + compute_statistics_plan(schema, table_metadata.properties), + parquet_path_to_id_mapping(schema), + ) + + assert len(datafile.lower_bounds) == 2 + assert datafile.lower_bounds[1].decode() == "aaaaaaaaaaaaaaaa" + assert datafile.lower_bounds[2] == STRUCT_FLOAT.pack(1.69) + + assert len(datafile.upper_bounds) == 2 + assert datafile.upper_bounds[1].decode() == "zzzzzzzzzzzzzzz{" + assert datafile.upper_bounds[2] == STRUCT_FLOAT.pack(100) + + +def test_metrics_mode_parsing() -> None: + assert match_metrics_mode("none") == MetricsMode(MetricModeTypes.NONE) + assert match_metrics_mode("nOnE") == MetricsMode(MetricModeTypes.NONE) + assert match_metrics_mode("counts") == MetricsMode(MetricModeTypes.COUNTS) + assert match_metrics_mode("Counts") == MetricsMode(MetricModeTypes.COUNTS) + assert match_metrics_mode("full") == MetricsMode(MetricModeTypes.FULL) + assert match_metrics_mode("FuLl") == MetricsMode(MetricModeTypes.FULL) + assert match_metrics_mode(" FuLl") == MetricsMode(MetricModeTypes.FULL) + + assert match_metrics_mode("truncate(16)") == MetricsMode(MetricModeTypes.TRUNCATE, 16) + assert match_metrics_mode("trUncatE(16)") == MetricsMode(MetricModeTypes.TRUNCATE, 16) + assert match_metrics_mode("trUncatE(7)") == MetricsMode(MetricModeTypes.TRUNCATE, 7) + assert match_metrics_mode("trUncatE(07)") == MetricsMode(MetricModeTypes.TRUNCATE, 7) + + with pytest.raises(ValueError) as exc_info: + match_metrics_mode("trUncatE(-7)") + assert "Malformed truncate: trUncatE(-7)" in str(exc_info.value) + + with pytest.raises(ValueError) as exc_info: + match_metrics_mode("trUncatE(0)") + assert "Truncation length must be larger than 0" in str(exc_info.value) + + +def test_metrics_mode_none() -> None: + (file_bytes, metadata, table_metadata) = construct_test_table() + + schema = get_current_schema(table_metadata) + datafile = DataFile() + table_metadata.properties["write.metadata.metrics.default"] = "none" + fill_parquet_file_metadata( + datafile, + metadata, + len(file_bytes), + compute_statistics_plan(schema, table_metadata.properties), + parquet_path_to_id_mapping(schema), + ) + + assert len(datafile.value_counts) == 0 + assert len(datafile.null_value_counts) == 0 + assert len(datafile.nan_value_counts) == 0 + assert len(datafile.lower_bounds) == 0 + assert len(datafile.upper_bounds) == 0 + + +def test_metrics_mode_counts() -> None: + (file_bytes, metadata, table_metadata) = construct_test_table() + + schema = get_current_schema(table_metadata) + datafile = DataFile() + table_metadata.properties["write.metadata.metrics.default"] = "counts" + fill_parquet_file_metadata( + datafile, + metadata, + len(file_bytes), + compute_statistics_plan(schema, table_metadata.properties), + parquet_path_to_id_mapping(schema), + ) + + assert len(datafile.value_counts) == 7 + assert len(datafile.null_value_counts) == 7 + assert len(datafile.nan_value_counts) == 0 + assert len(datafile.lower_bounds) == 0 + assert len(datafile.upper_bounds) == 0 + + +def test_metrics_mode_full() -> None: + (file_bytes, metadata, table_metadata) = construct_test_table() + + schema = get_current_schema(table_metadata) + datafile = DataFile() + table_metadata.properties["write.metadata.metrics.default"] = "full" + fill_parquet_file_metadata( + datafile, + metadata, + len(file_bytes), + compute_statistics_plan(schema, table_metadata.properties), + parquet_path_to_id_mapping(schema), + ) + + assert len(datafile.value_counts) == 7 + assert len(datafile.null_value_counts) == 7 + assert len(datafile.nan_value_counts) == 0 + + assert len(datafile.lower_bounds) == 2 + assert datafile.lower_bounds[1].decode() == "aaaaaaaaaaaaaaaaaaaa" + assert datafile.lower_bounds[2] == STRUCT_FLOAT.pack(1.69) + + assert len(datafile.upper_bounds) == 2 + assert datafile.upper_bounds[1].decode() == "zzzzzzzzzzzzzzzzzzzz" + assert datafile.upper_bounds[2] == STRUCT_FLOAT.pack(100) + + +def test_metrics_mode_non_default_trunc() -> None: + (file_bytes, metadata, table_metadata) = construct_test_table() + + schema = get_current_schema(table_metadata) + datafile = DataFile() + table_metadata.properties["write.metadata.metrics.default"] = "truncate(2)" + fill_parquet_file_metadata( + datafile, + metadata, + len(file_bytes), + compute_statistics_plan(schema, table_metadata.properties), + parquet_path_to_id_mapping(schema), + ) + + assert len(datafile.value_counts) == 7 + assert len(datafile.null_value_counts) == 7 + assert len(datafile.nan_value_counts) == 0 + + assert len(datafile.lower_bounds) == 2 + assert datafile.lower_bounds[1].decode() == "aa" + assert datafile.lower_bounds[2] == STRUCT_FLOAT.pack(1.69) + + assert len(datafile.upper_bounds) == 2 + assert datafile.upper_bounds[1].decode() == "z{" + assert datafile.upper_bounds[2] == STRUCT_FLOAT.pack(100) + + +def test_column_metrics_mode() -> None: + (file_bytes, metadata, table_metadata) = construct_test_table() + + schema = get_current_schema(table_metadata) + datafile = DataFile() + table_metadata.properties["write.metadata.metrics.default"] = "truncate(2)" + table_metadata.properties["write.metadata.metrics.column.strings"] = "none" + fill_parquet_file_metadata( + datafile, + metadata, + len(file_bytes), + compute_statistics_plan(schema, table_metadata.properties), + parquet_path_to_id_mapping(schema), + ) + + assert len(datafile.value_counts) == 6 + assert len(datafile.null_value_counts) == 6 + assert len(datafile.nan_value_counts) == 0 + + assert len(datafile.lower_bounds) == 1 + assert datafile.lower_bounds[2] == STRUCT_FLOAT.pack(1.69) + assert 1 not in datafile.lower_bounds + + assert len(datafile.upper_bounds) == 1 + assert datafile.upper_bounds[2] == STRUCT_FLOAT.pack(100) + assert 1 not in datafile.upper_bounds + + +def construct_test_table_primitive_types() -> Tuple[Any, Any, Union[TableMetadataV1, TableMetadataV2]]: + table_metadata = { + "format-version": 2, + "location": "s3://bucket/test/location", + "last-column-id": 7, + "current-schema-id": 0, + "schemas": [ + { + "type": "struct", + "schema-id": 0, + "fields": [ + {"id": 1, "name": "booleans", "required": False, "type": "boolean"}, + {"id": 2, "name": "ints", "required": False, "type": "int"}, + {"id": 3, "name": "longs", "required": False, "type": "long"}, + {"id": 4, "name": "floats", "required": False, "type": "float"}, + {"id": 5, "name": "doubles", "required": False, "type": "double"}, + {"id": 6, "name": "dates", "required": False, "type": "date"}, + {"id": 7, "name": "times", "required": False, "type": "time"}, + {"id": 8, "name": "timestamps", "required": False, "type": "timestamp"}, + {"id": 9, "name": "timestamptzs", "required": False, "type": "timestamptz"}, + {"id": 10, "name": "strings", "required": False, "type": "string"}, + {"id": 11, "name": "uuids", "required": False, "type": "uuid"}, + {"id": 12, "name": "binaries", "required": False, "type": "binary"}, + ], + }, + ], + "default-spec-id": 0, + "partition-specs": [{"spec-id": 0, "fields": []}], + "properties": {}, + } + + table_metadata = TableMetadataUtil.parse_obj(table_metadata) + arrow_schema = schema_to_pyarrow(table_metadata.schemas[0]) + tz = timezone(timedelta(seconds=19800)) + + booleans = [True, False] + ints = [23, 89] + longs = [54, 2] + floats = [454.1223, 24342.29] + doubles = [8542.12, -43.9] + dates = [date(2022, 1, 2), date(2023, 2, 4)] + times = [time(17, 30, 34), time(13, 21, 4)] + timestamps = [datetime(2022, 1, 2, 17, 30, 34, 399), datetime(2023, 2, 4, 13, 21, 4, 354)] + timestamptzs = [datetime(2022, 1, 2, 17, 30, 34, 399, tz), datetime(2023, 2, 4, 13, 21, 4, 354, tz)] + strings = ["hello", "world"] + uuids = [uuid.uuid3(uuid.NAMESPACE_DNS, "foo").bytes, uuid.uuid3(uuid.NAMESPACE_DNS, "bar").bytes] + binaries = [b"hello", b"world"] + + table = pa.Table.from_pydict( + { + "booleans": booleans, + "ints": ints, + "longs": longs, + "floats": floats, + "doubles": doubles, + "dates": dates, + "times": times, + "timestamps": timestamps, + "timestamptzs": timestamptzs, + "strings": strings, + "uuids": uuids, + "binaries": binaries, + }, + schema=arrow_schema, + ) + + metadata_collector: List[Any] = [] + + with pa.BufferOutputStream() as f: + with pq.ParquetWriter(f, table.schema, metadata_collector=metadata_collector) as writer: + writer.write_table(table) + + return f.getvalue(), metadata_collector[0], table_metadata + + +def test_metrics_primitive_types() -> None: + (file_bytes, metadata, table_metadata) = construct_test_table_primitive_types() + + schema = get_current_schema(table_metadata) + datafile = DataFile() + table_metadata.properties["write.metadata.metrics.default"] = "truncate(2)" + fill_parquet_file_metadata( + datafile, + metadata, + len(file_bytes), + compute_statistics_plan(schema, table_metadata.properties), + parquet_path_to_id_mapping(schema), + ) + + assert len(datafile.value_counts) == 12 + assert len(datafile.null_value_counts) == 12 + assert len(datafile.nan_value_counts) == 0 + + tz = timezone(timedelta(seconds=19800)) + + assert len(datafile.lower_bounds) == 12 + assert datafile.lower_bounds[1] == STRUCT_BOOL.pack(False) + assert datafile.lower_bounds[2] == STRUCT_INT32.pack(23) + assert datafile.lower_bounds[3] == STRUCT_INT64.pack(2) + assert datafile.lower_bounds[4] == STRUCT_FLOAT.pack(454.1223) + assert datafile.lower_bounds[5] == STRUCT_DOUBLE.pack(-43.9) + assert datafile.lower_bounds[6] == STRUCT_INT32.pack(date_to_days(date(2022, 1, 2))) + assert datafile.lower_bounds[7] == STRUCT_INT64.pack(time_to_micros(time(13, 21, 4))) + assert datafile.lower_bounds[8] == STRUCT_INT64.pack(datetime_to_micros(datetime(2022, 1, 2, 17, 30, 34, 399))) + assert datafile.lower_bounds[9] == STRUCT_INT64.pack(datetime_to_micros(datetime(2022, 1, 2, 17, 30, 34, 399, tz))) + assert datafile.lower_bounds[10] == b"he" + assert datafile.lower_bounds[11] == uuid.uuid3(uuid.NAMESPACE_DNS, "foo").bytes + assert datafile.lower_bounds[12] == b"he" + + assert len(datafile.upper_bounds) == 12 + assert datafile.upper_bounds[1] == STRUCT_BOOL.pack(True) + assert datafile.upper_bounds[2] == STRUCT_INT32.pack(89) + assert datafile.upper_bounds[3] == STRUCT_INT64.pack(54) + assert datafile.upper_bounds[4] == STRUCT_FLOAT.pack(24342.29) + assert datafile.upper_bounds[5] == STRUCT_DOUBLE.pack(8542.12) + assert datafile.upper_bounds[6] == STRUCT_INT32.pack(date_to_days(date(2023, 2, 4))) + assert datafile.upper_bounds[7] == STRUCT_INT64.pack(time_to_micros(time(17, 30, 34))) + assert datafile.upper_bounds[8] == STRUCT_INT64.pack(datetime_to_micros(datetime(2023, 2, 4, 13, 21, 4, 354))) + assert datafile.upper_bounds[9] == STRUCT_INT64.pack(datetime_to_micros(datetime(2023, 2, 4, 13, 21, 4, 354, tz))) + assert datafile.upper_bounds[10] == b"wp" + assert datafile.upper_bounds[11] == uuid.uuid3(uuid.NAMESPACE_DNS, "bar").bytes + assert datafile.upper_bounds[12] == b"wp" + + +def construct_test_table_invalid_upper_bound() -> Tuple[Any, Any, Union[TableMetadataV1, TableMetadataV2]]: + table_metadata = { + "format-version": 2, + "location": "s3://bucket/test/location", + "last-column-id": 7, + "current-schema-id": 0, + "schemas": [ + { + "type": "struct", + "schema-id": 0, + "fields": [ + {"id": 1, "name": "valid_upper_binary", "required": False, "type": "binary"}, + {"id": 2, "name": "invalid_upper_binary", "required": False, "type": "binary"}, + {"id": 3, "name": "valid_upper_string", "required": False, "type": "string"}, + {"id": 4, "name": "invalid_upper_string", "required": False, "type": "string"}, + ], + }, + ], + "default-spec-id": 0, + "partition-specs": [{"spec-id": 0, "fields": []}], + "properties": {}, + } + + table_metadata = TableMetadataUtil.parse_obj(table_metadata) + arrow_schema = schema_to_pyarrow(table_metadata.schemas[0]) + + valid_binaries = [b"\x00\x00\x00", b"\xff\xfe\x00"] + invalid_binaries = [b"\x00\x00\x00", b"\xff\xff\x00"] + + valid_strings = ["\x00\x00\x00", "".join([chr(0x10FFFF), chr(0x10FFFE), chr(0x0)])] + invalid_strings = ["\x00\x00\x00", "".join([chr(0x10FFFF), chr(0x10FFFF), chr(0x0)])] + + table = pa.Table.from_pydict( + { + "valid_upper_binary": valid_binaries, + "invalid_upper_binary": invalid_binaries, + "valid_upper_string": valid_strings, + "invalid_upper_string": invalid_strings, + }, + schema=arrow_schema, + ) + + metadata_collector: List[Any] = [] + + with pa.BufferOutputStream() as f: + with pq.ParquetWriter(f, table.schema, metadata_collector=metadata_collector) as writer: + writer.write_table(table) + + return f.getvalue(), metadata_collector[0], table_metadata + + +def test_metrics_invalid_upper_bound() -> None: + (file_bytes, metadata, table_metadata) = construct_test_table_invalid_upper_bound() + + schema = get_current_schema(table_metadata) + datafile = DataFile() + table_metadata.properties["write.metadata.metrics.default"] = "truncate(2)" + fill_parquet_file_metadata( + datafile, + metadata, + len(file_bytes), + compute_statistics_plan(schema, table_metadata.properties), + parquet_path_to_id_mapping(schema), + ) + + assert len(datafile.value_counts) == 4 + assert len(datafile.null_value_counts) == 4 + assert len(datafile.nan_value_counts) == 0 + + assert len(datafile.lower_bounds) == 4 + assert datafile.lower_bounds[1] == b"\x00\x00" + assert datafile.lower_bounds[2] == b"\x00\x00" + assert datafile.lower_bounds[3] == b"\x00\x00" + assert datafile.lower_bounds[4] == b"\x00\x00" + + assert len(datafile.upper_bounds) == 2 + assert datafile.upper_bounds[1] == b"\xff\xff" + assert datafile.upper_bounds[3] == "".join([chr(0x10FFFF), chr(0x10FFFF)]).encode() + + +def test_offsets() -> None: + (file_bytes, metadata, table_metadata) = construct_test_table() + + schema = get_current_schema(table_metadata) + datafile = DataFile() + fill_parquet_file_metadata( + datafile, + metadata, + len(file_bytes), + compute_statistics_plan(schema, table_metadata.properties), + parquet_path_to_id_mapping(schema), + ) + + assert datafile.split_offsets is not None + assert len(datafile.split_offsets) == 1 + assert datafile.split_offsets[0] == 4 + + +def test_write_and_read_stats_schema(table_schema_nested: Schema) -> None: + tbl = pa.Table.from_pydict( + { + "foo": ["a", "b"], + "bar": [1, 2], + "baz": [False, True], + "qux": [["a", "b"], ["c", "d"]], + "quux": [[("a", (("aa", 1), ("ab", 2)))], [("b", (("ba", 3), ("bb", 4)))]], + "location": [[(52.377956, 4.897070), (4.897070, -122.431297)], [(43.618881, -116.215019), (41.881832, -87.623177)]], + "person": [("Fokko", 33), ("Max", 42)], # Possible data quality issue + }, + schema=schema_to_pyarrow(table_schema_nested), + ) + stats_columns = pre_order_visit(table_schema_nested, PyArrowStatisticsCollector(table_schema_nested, {})) + + visited_paths = [] + + def file_visitor(written_file: Any) -> None: + visited_paths.append(written_file) + + with tempfile.TemporaryDirectory() as tmpdir: + pq.write_to_dataset(tbl, tmpdir, file_visitor=file_visitor) + + assert visited_paths[0].metadata.num_columns == len(stats_columns) + + +def test_stats_types(table_schema_nested: Schema) -> None: + stats_columns = pre_order_visit(table_schema_nested, PyArrowStatisticsCollector(table_schema_nested, {})) + + # the field-ids should be sorted + assert all(stats_columns[i].field_id <= stats_columns[i + 1].field_id for i in range(len(stats_columns) - 1)) + assert [col.iceberg_type for col in stats_columns] == [ + StringType(), + IntegerType(), + BooleanType(), + StringType(), + StringType(), + StringType(), + IntegerType(), + FloatType(), + FloatType(), + StringType(), + IntegerType(), + ] + + +# This is commented out for now because write_to_dataset drops the partition +# columns making it harder to calculate the mapping from the column index to +# datatype id +# +# def test_dataset() -> pa.Buffer: + +# table_metadata = { +# "format-version": 2, +# "location": "s3://bucket/test/location", +# "last-column-id": 7, +# "current-schema-id": 0, +# "schemas": [ +# { +# "type": "struct", +# "schema-id": 0, +# "fields": [ +# {"id": 1, "name": "ints", "required": False, "type": "long"}, +# {"id": 2, "name": "even", "required": False, "type": "boolean"}, +# ], +# }, +# ], +# "default-spec-id": 0, +# "partition-specs": [{"spec-id": 0, "fields": []}], +# "properties": {}, +# } + +# table_metadata = TableMetadataUtil.parse_obj(table_metadata) +# schema = schema_to_pyarrow(table_metadata.schemas[0]) + +# _ints = [0, 2, 4, 8, 1, 3, 5, 7] +# parity = [True, True, True, True, False, False, False, False] + +# table = pa.Table.from_pydict({"ints": _ints, "even": parity}, schema=schema) + +# visited_paths = [] + +# def file_visitor(written_file: Any) -> None: +# visited_paths.append(written_file) + +# with TemporaryDirectory() as tmpdir: +# pq.write_to_dataset(table, tmpdir, partition_cols=["even"], file_visitor=file_visitor) + +# even = None +# odd = None + +# assert len(visited_paths) == 2 + +# for written_file in visited_paths: +# df = DataFile() + +# fill_parquet_file_metadata(df, written_file.metadata, written_file.size, table_metadata) + +# if "even=true" in written_file.path: +# even = df + +# if "even=false" in written_file.path: +# odd = df + +# assert even is not None +# assert odd is not None + +# assert len(even.value_counts) == 1 +# assert even.value_counts[1] == 4 +# assert len(even.lower_bounds) == 1 +# assert even.lower_bounds[1] == STRUCT_INT64.pack(0) +# assert len(even.upper_bounds) == 1 +# assert even.upper_bounds[1] == STRUCT_INT64.pack(8) + +# assert len(odd.value_counts) == 1 +# assert odd.value_counts[1] == 4 +# assert len(odd.lower_bounds) == 1 +# assert odd.lower_bounds[1] == STRUCT_INT64.pack(1) +# assert len(odd.upper_bounds) == 1 +# assert odd.upper_bounds[1] == STRUCT_INT64.pack(7) diff --git a/tests/io/test_pyarrow_visitor.py b/tests/io/test_pyarrow_visitor.py new file mode 100644 index 0000000000..5194d8660e --- /dev/null +++ b/tests/io/test_pyarrow_visitor.py @@ -0,0 +1,271 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# pylint: disable=protected-access,unused-argument,redefined-outer-name +import re + +import pyarrow as pa +import pytest + +from pyiceberg.io.pyarrow import ( + _ConvertToArrowSchema, + _ConvertToIceberg, + pyarrow_to_schema, + schema_to_pyarrow, + visit_pyarrow, +) +from pyiceberg.schema import Schema, visit +from pyiceberg.types import ( + BinaryType, + BooleanType, + DateType, + DecimalType, + DoubleType, + FixedType, + FloatType, + IntegerType, + ListType, + LongType, + MapType, + NestedField, + StringType, + StructType, + TimestampType, + TimestamptzType, + TimeType, +) + + +def test_pyarrow_binary_to_iceberg() -> None: + length = 23 + pyarrow_type = pa.binary(length) + converted_iceberg_type = visit_pyarrow(pyarrow_type, _ConvertToIceberg()) + assert converted_iceberg_type == FixedType(length) + assert visit(converted_iceberg_type, _ConvertToArrowSchema()) == pyarrow_type + + +def test_pyarrow_decimal128_to_iceberg() -> None: + precision = 26 + scale = 20 + pyarrow_type = pa.decimal128(precision, scale) + converted_iceberg_type = visit_pyarrow(pyarrow_type, _ConvertToIceberg()) + assert converted_iceberg_type == DecimalType(precision, scale) + assert visit(converted_iceberg_type, _ConvertToArrowSchema()) == pyarrow_type + + +def test_pyarrow_decimal256_to_iceberg() -> None: + precision = 26 + scale = 20 + pyarrow_type = pa.decimal256(precision, scale) + with pytest.raises(TypeError, match=re.escape("Unsupported type: decimal256(26, 20)")): + visit_pyarrow(pyarrow_type, _ConvertToIceberg()) + + +def test_pyarrow_boolean_to_iceberg() -> None: + pyarrow_type = pa.bool_() + converted_iceberg_type = visit_pyarrow(pyarrow_type, _ConvertToIceberg()) + assert converted_iceberg_type == BooleanType() + assert visit(converted_iceberg_type, _ConvertToArrowSchema()) == pyarrow_type + + +def test_pyarrow_int32_to_iceberg() -> None: + pyarrow_type = pa.int32() + converted_iceberg_type = visit_pyarrow(pyarrow_type, _ConvertToIceberg()) + assert converted_iceberg_type == IntegerType() + assert visit(converted_iceberg_type, _ConvertToArrowSchema()) == pyarrow_type + + +def test_pyarrow_int64_to_iceberg() -> None: + pyarrow_type = pa.int64() + converted_iceberg_type = visit_pyarrow(pyarrow_type, _ConvertToIceberg()) + assert converted_iceberg_type == LongType() + assert visit(converted_iceberg_type, _ConvertToArrowSchema()) == pyarrow_type + + +def test_pyarrow_float32_to_iceberg() -> None: + pyarrow_type = pa.float32() + converted_iceberg_type = visit_pyarrow(pyarrow_type, _ConvertToIceberg()) + assert converted_iceberg_type == FloatType() + assert visit(converted_iceberg_type, _ConvertToArrowSchema()) == pyarrow_type + + +def test_pyarrow_float64_to_iceberg() -> None: + pyarrow_type = pa.float64() + converted_iceberg_type = visit_pyarrow(pyarrow_type, _ConvertToIceberg()) + assert converted_iceberg_type == DoubleType() + assert visit(converted_iceberg_type, _ConvertToArrowSchema()) == pyarrow_type + + +def test_pyarrow_date32_to_iceberg() -> None: + pyarrow_type = pa.date32() + converted_iceberg_type = visit_pyarrow(pyarrow_type, _ConvertToIceberg()) + assert converted_iceberg_type == DateType() + assert visit(converted_iceberg_type, _ConvertToArrowSchema()) == pyarrow_type + + +def test_pyarrow_date64_to_iceberg() -> None: + pyarrow_type = pa.date64() + with pytest.raises(TypeError, match=re.escape("Unsupported type: date64")): + visit_pyarrow(pyarrow_type, _ConvertToIceberg()) + + +def test_pyarrow_time32_to_iceberg() -> None: + pyarrow_type = pa.time32("ms") + with pytest.raises(TypeError, match=re.escape("Unsupported type: time32[ms]")): + visit_pyarrow(pyarrow_type, _ConvertToIceberg()) + pyarrow_type = pa.time32("s") + with pytest.raises(TypeError, match=re.escape("Unsupported type: time32[s]")): + visit_pyarrow(pyarrow_type, _ConvertToIceberg()) + + +def test_pyarrow_time64_us_to_iceberg() -> None: + pyarrow_type = pa.time64("us") + converted_iceberg_type = visit_pyarrow(pyarrow_type, _ConvertToIceberg()) + assert converted_iceberg_type == TimeType() + assert visit(converted_iceberg_type, _ConvertToArrowSchema()) == pyarrow_type + + +def test_pyarrow_time64_ns_to_iceberg() -> None: + pyarrow_type = pa.time64("ns") + with pytest.raises(TypeError, match=re.escape("Unsupported type: time64[ns]")): + visit_pyarrow(pyarrow_type, _ConvertToIceberg()) + + +def test_pyarrow_timestamp_to_iceberg() -> None: + pyarrow_type = pa.timestamp(unit="us") + converted_iceberg_type = visit_pyarrow(pyarrow_type, _ConvertToIceberg()) + assert converted_iceberg_type == TimestampType() + assert visit(converted_iceberg_type, _ConvertToArrowSchema()) == pyarrow_type + + +def test_pyarrow_timestamp_invalid_units() -> None: + pyarrow_type = pa.timestamp(unit="ms") + with pytest.raises(TypeError, match=re.escape("Unsupported type: timestamp[ms]")): + visit_pyarrow(pyarrow_type, _ConvertToIceberg()) + pyarrow_type = pa.timestamp(unit="s") + with pytest.raises(TypeError, match=re.escape("Unsupported type: timestamp[s]")): + visit_pyarrow(pyarrow_type, _ConvertToIceberg()) + pyarrow_type = pa.timestamp(unit="ns") + with pytest.raises(TypeError, match=re.escape("Unsupported type: timestamp[ns]")): + visit_pyarrow(pyarrow_type, _ConvertToIceberg()) + + +def test_pyarrow_timestamp_tz_to_iceberg() -> None: + pyarrow_type = pa.timestamp(unit="us", tz="UTC") + pyarrow_type_zero_offset = pa.timestamp(unit="us", tz="+00:00") + converted_iceberg_type = visit_pyarrow(pyarrow_type, _ConvertToIceberg()) + converted_iceberg_type_zero_offset = visit_pyarrow(pyarrow_type_zero_offset, _ConvertToIceberg()) + assert converted_iceberg_type == TimestamptzType() + assert converted_iceberg_type_zero_offset == TimestamptzType() + assert visit(converted_iceberg_type, _ConvertToArrowSchema()) == pyarrow_type + assert visit(converted_iceberg_type_zero_offset, _ConvertToArrowSchema()) == pyarrow_type + + +def test_pyarrow_timestamp_tz_invalid_units() -> None: + pyarrow_type = pa.timestamp(unit="ms", tz="UTC") + with pytest.raises(TypeError, match=re.escape("Unsupported type: timestamp[ms, tz=UTC]")): + visit_pyarrow(pyarrow_type, _ConvertToIceberg()) + pyarrow_type = pa.timestamp(unit="s", tz="UTC") + with pytest.raises(TypeError, match=re.escape("Unsupported type: timestamp[s, tz=UTC]")): + visit_pyarrow(pyarrow_type, _ConvertToIceberg()) + pyarrow_type = pa.timestamp(unit="ns", tz="UTC") + with pytest.raises(TypeError, match=re.escape("Unsupported type: timestamp[ns, tz=UTC]")): + visit_pyarrow(pyarrow_type, _ConvertToIceberg()) + + +def test_pyarrow_timestamp_tz_invalid_tz() -> None: + pyarrow_type = pa.timestamp(unit="us", tz="US/Pacific") + with pytest.raises(TypeError, match=re.escape("Unsupported type: timestamp[us, tz=US/Pacific]")): + visit_pyarrow(pyarrow_type, _ConvertToIceberg()) + + +def test_pyarrow_string_to_iceberg() -> None: + pyarrow_type = pa.string() + converted_iceberg_type = visit_pyarrow(pyarrow_type, _ConvertToIceberg()) + assert converted_iceberg_type == StringType() + assert visit(converted_iceberg_type, _ConvertToArrowSchema()) == pyarrow_type + + +def test_pyarrow_variable_binary_to_iceberg() -> None: + pyarrow_type = pa.binary() + converted_iceberg_type = visit_pyarrow(pyarrow_type, _ConvertToIceberg()) + assert converted_iceberg_type == BinaryType() + assert visit(converted_iceberg_type, _ConvertToArrowSchema()) == pyarrow_type + + +def test_pyarrow_struct_to_iceberg() -> None: + pyarrow_struct = pa.struct( + [ + pa.field("foo", pa.string(), nullable=True, metadata={"field_id": "1", "doc": "foo doc"}), + pa.field("bar", pa.int32(), nullable=False, metadata={"field_id": "2"}), + pa.field("baz", pa.bool_(), nullable=True, metadata={"field_id": "3"}), + ] + ) + expected = StructType( + NestedField(field_id=1, name="foo", field_type=StringType(), required=False, doc="foo doc"), + NestedField(field_id=2, name="bar", field_type=IntegerType(), required=True), + NestedField(field_id=3, name="baz", field_type=BooleanType(), required=False), + ) + assert visit_pyarrow(pyarrow_struct, _ConvertToIceberg()) == expected + + +def test_pyarrow_list_to_iceberg() -> None: + pyarrow_list = pa.list_(pa.field("element", pa.int32(), nullable=False, metadata={"field_id": "1"})) + expected = ListType( + element_id=1, + element_type=IntegerType(), + element_required=True, + ) + assert visit_pyarrow(pyarrow_list, _ConvertToIceberg()) == expected + + +def test_pyarrow_map_to_iceberg() -> None: + pyarrow_map = pa.map_( + pa.field("key", pa.int32(), nullable=False, metadata={"field_id": "1"}), + pa.field("value", pa.string(), nullable=False, metadata={"field_id": "2"}), + ) + expected = MapType( + key_id=1, + key_type=IntegerType(), + value_id=2, + value_type=StringType(), + value_required=True, + ) + assert visit_pyarrow(pyarrow_map, _ConvertToIceberg()) == expected + + +def test_round_schema_conversion_simple(table_schema_simple: Schema) -> None: + actual = str(pyarrow_to_schema(schema_to_pyarrow(table_schema_simple))) + expected = """table { + 1: foo: optional string + 2: bar: required int + 3: baz: optional boolean +}""" + assert actual == expected + + +def test_round_schema_conversion_nested(table_schema_nested: Schema) -> None: + actual = str(pyarrow_to_schema(schema_to_pyarrow(table_schema_nested))) + expected = """table { + 1: foo: optional string + 2: bar: required int + 3: baz: optional boolean + 4: qux: required list + 6: quux: required map> + 11: location: required list> + 15: person: optional struct<16: name: optional string, 17: age: required int> +}""" + assert actual == expected diff --git a/tests/table/test_init.py b/tests/table/test_init.py new file mode 100644 index 0000000000..8fd5e2bcdb --- /dev/null +++ b/tests/table/test_init.py @@ -0,0 +1,508 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# pylint:disable=redefined-outer-name +from typing import Dict + +import pytest +from sortedcontainers import SortedList + +from pyiceberg.expressions import ( + AlwaysTrue, + And, + EqualTo, + In, +) +from pyiceberg.io import PY_IO_IMPL +from pyiceberg.manifest import ( + DataFile, + DataFileContent, + FileFormat, + ManifestEntry, + ManifestEntryStatus, +) +from pyiceberg.partitioning import PartitionField, PartitionSpec +from pyiceberg.schema import Schema +from pyiceberg.table import ( + SetPropertiesUpdate, + StaticTable, + Table, + UpdateSchema, + _match_deletes_to_datafile, +) +from pyiceberg.table.metadata import INITIAL_SEQUENCE_NUMBER +from pyiceberg.table.snapshots import ( + Operation, + Snapshot, + SnapshotLogEntry, + Summary, +) +from pyiceberg.table.sorting import ( + NullOrder, + SortDirection, + SortField, + SortOrder, +) +from pyiceberg.transforms import BucketTransform, IdentityTransform +from pyiceberg.types import ( + BinaryType, + BooleanType, + DateType, + DoubleType, + FloatType, + IntegerType, + ListType, + LongType, + MapType, + NestedField, + PrimitiveType, + StringType, + StructType, + TimestampType, + TimestamptzType, + TimeType, + UUIDType, +) + + +def test_schema(table: Table) -> None: + assert table.schema() == Schema( + NestedField(field_id=1, name="x", field_type=LongType(), required=True), + NestedField(field_id=2, name="y", field_type=LongType(), required=True, doc="comment"), + NestedField(field_id=3, name="z", field_type=LongType(), required=True), + schema_id=1, + identifier_field_ids=[1, 2], + ) + + +def test_schemas(table: Table) -> None: + assert table.schemas() == { + 0: Schema( + NestedField(field_id=1, name="x", field_type=LongType(), required=True), + schema_id=0, + identifier_field_ids=[], + ), + 1: Schema( + NestedField(field_id=1, name="x", field_type=LongType(), required=True), + NestedField(field_id=2, name="y", field_type=LongType(), required=True, doc="comment"), + NestedField(field_id=3, name="z", field_type=LongType(), required=True), + schema_id=1, + identifier_field_ids=[1, 2], + ), + } + + +def test_spec(table: Table) -> None: + assert table.spec() == PartitionSpec( + PartitionField(source_id=1, field_id=1000, transform=IdentityTransform(), name="x"), spec_id=0 + ) + + +def test_specs(table: Table) -> None: + assert table.specs() == { + 0: PartitionSpec(PartitionField(source_id=1, field_id=1000, transform=IdentityTransform(), name="x"), spec_id=0) + } + + +def test_sort_order(table: Table) -> None: + assert table.sort_order() == SortOrder( + SortField(source_id=2, transform=IdentityTransform(), direction=SortDirection.ASC, null_order=NullOrder.NULLS_FIRST), + SortField( + source_id=3, + transform=BucketTransform(num_buckets=4), + direction=SortDirection.DESC, + null_order=NullOrder.NULLS_LAST, + ), + order_id=3, + ) + + +def test_sort_orders(table: Table) -> None: + assert table.sort_orders() == { + 3: SortOrder( + SortField(source_id=2, transform=IdentityTransform(), direction=SortDirection.ASC, null_order=NullOrder.NULLS_FIRST), + SortField( + source_id=3, + transform=BucketTransform(num_buckets=4), + direction=SortDirection.DESC, + null_order=NullOrder.NULLS_LAST, + ), + order_id=3, + ) + } + + +def test_location(table: Table) -> None: + assert table.location() == "s3://bucket/test/location" + + +def test_current_snapshot(table: Table) -> None: + assert table.current_snapshot() == Snapshot( + snapshot_id=3055729675574597004, + parent_snapshot_id=3051729675574597004, + sequence_number=1, + timestamp_ms=1555100955770, + manifest_list="s3://a/b/2.avro", + summary=Summary(operation=Operation.APPEND), + schema_id=1, + ) + + +def test_snapshot_by_id(table: Table) -> None: + assert table.snapshot_by_id(3055729675574597004) == Snapshot( + snapshot_id=3055729675574597004, + parent_snapshot_id=3051729675574597004, + sequence_number=1, + timestamp_ms=1555100955770, + manifest_list="s3://a/b/2.avro", + summary=Summary(operation=Operation.APPEND), + schema_id=1, + ) + + +def test_snapshot_by_id_does_not_exist(table: Table) -> None: + assert table.snapshot_by_id(-1) is None + + +def test_snapshot_by_name(table: Table) -> None: + assert table.snapshot_by_name("test") == Snapshot( + snapshot_id=3051729675574597004, + parent_snapshot_id=None, + sequence_number=0, + timestamp_ms=1515100955770, + manifest_list="s3://a/b/1.avro", + summary=Summary(operation=Operation.APPEND), + schema_id=None, + ) + + +def test_snapshot_by_name_does_not_exist(table: Table) -> None: + assert table.snapshot_by_name("doesnotexist") is None + + +def test_repr(table: Table) -> None: + expected = """table( + 1: x: required long, + 2: y: required long (comment), + 3: z: required long +), +partition by: [x], +sort order: [2 ASC NULLS FIRST, bucket[4](3) DESC NULLS LAST], +snapshot: Operation.APPEND: id=3055729675574597004, parent_id=3051729675574597004, schema_id=1""" + assert repr(table) == expected + + +def test_history(table: Table) -> None: + assert table.history() == [ + SnapshotLogEntry(snapshot_id=3051729675574597004, timestamp_ms=1515100955770), + SnapshotLogEntry(snapshot_id=3055729675574597004, timestamp_ms=1555100955770), + ] + + +def test_table_scan_select(table: Table) -> None: + scan = table.scan() + assert scan.selected_fields == ("*",) + assert scan.select("a", "b").selected_fields == ("a", "b") + assert scan.select("a", "c").select("a").selected_fields == ("a",) + + +def test_table_scan_row_filter(table: Table) -> None: + scan = table.scan() + assert scan.row_filter == AlwaysTrue() + assert scan.filter(EqualTo("x", 10)).row_filter == EqualTo("x", 10) + assert scan.filter(EqualTo("x", 10)).filter(In("y", (10, 11))).row_filter == And(EqualTo("x", 10), In("y", (10, 11))) + + +def test_table_scan_ref(table: Table) -> None: + scan = table.scan() + assert scan.use_ref("test").snapshot_id == 3051729675574597004 + + +def test_table_scan_ref_does_not_exists(table: Table) -> None: + scan = table.scan() + + with pytest.raises(ValueError) as exc_info: + _ = scan.use_ref("boom") + + assert "Cannot scan unknown ref=boom" in str(exc_info.value) + + +def test_table_scan_projection_full_schema(table: Table) -> None: + scan = table.scan() + assert scan.select("x", "y", "z").projection() == Schema( + NestedField(field_id=1, name="x", field_type=LongType(), required=True), + NestedField(field_id=2, name="y", field_type=LongType(), required=True, doc="comment"), + NestedField(field_id=3, name="z", field_type=LongType(), required=True), + schema_id=1, + identifier_field_ids=[1, 2], + ) + + +def test_table_scan_projection_single_column(table: Table) -> None: + scan = table.scan() + assert scan.select("y").projection() == Schema( + NestedField(field_id=2, name="y", field_type=LongType(), required=True, doc="comment"), + schema_id=1, + identifier_field_ids=[2], + ) + + +def test_table_scan_projection_single_column_case_sensitive(table: Table) -> None: + scan = table.scan() + assert scan.with_case_sensitive(False).select("Y").projection() == Schema( + NestedField(field_id=2, name="y", field_type=LongType(), required=True, doc="comment"), + schema_id=1, + identifier_field_ids=[2], + ) + + +def test_table_scan_projection_unknown_column(table: Table) -> None: + scan = table.scan() + + with pytest.raises(ValueError) as exc_info: + _ = scan.select("a").projection() + + assert "Could not find column: 'a'" in str(exc_info.value) + + +def test_static_table_same_as_table(table: Table, metadata_location: str) -> None: + static_table = StaticTable.from_metadata(metadata_location) + assert isinstance(static_table, Table) + assert static_table.metadata == table.metadata + + +def test_static_table_gz_same_as_table(table: Table, metadata_location_gz: str) -> None: + static_table = StaticTable.from_metadata(metadata_location_gz) + assert isinstance(static_table, Table) + assert static_table.metadata == table.metadata + + +def test_static_table_io_does_not_exist(metadata_location: str) -> None: + with pytest.raises(ValueError): + StaticTable.from_metadata(metadata_location, {PY_IO_IMPL: "pyiceberg.does.not.exist.FileIO"}) + + +def test_match_deletes_to_datafile() -> None: + data_entry = ManifestEntry( + status=ManifestEntryStatus.ADDED, + sequence_number=1, + data_file=DataFile( + content=DataFileContent.DATA, + file_path="s3://bucket/0000.parquet", + file_format=FileFormat.PARQUET, + partition={}, + record_count=3, + file_size_in_bytes=3, + ), + ) + delete_entry_1 = ManifestEntry( + status=ManifestEntryStatus.ADDED, + sequence_number=0, # Older than the data + data_file=DataFile( + content=DataFileContent.POSITION_DELETES, + file_path="s3://bucket/0001-delete.parquet", + file_format=FileFormat.PARQUET, + partition={}, + record_count=3, + file_size_in_bytes=3, + ), + ) + delete_entry_2 = ManifestEntry( + status=ManifestEntryStatus.ADDED, + sequence_number=3, + data_file=DataFile( + content=DataFileContent.POSITION_DELETES, + file_path="s3://bucket/0002-delete.parquet", + file_format=FileFormat.PARQUET, + partition={}, + record_count=3, + file_size_in_bytes=3, + # We don't really care about the tests here + value_counts={}, + null_value_counts={}, + nan_value_counts={}, + lower_bounds={}, + upper_bounds={}, + ), + ) + assert _match_deletes_to_datafile( + data_entry, + SortedList(iterable=[delete_entry_1, delete_entry_2], key=lambda entry: entry.sequence_number or INITIAL_SEQUENCE_NUMBER), + ) == { + delete_entry_2.data_file, + } + + +def test_match_deletes_to_datafile_duplicate_number() -> None: + data_entry = ManifestEntry( + status=ManifestEntryStatus.ADDED, + sequence_number=1, + data_file=DataFile( + content=DataFileContent.DATA, + file_path="s3://bucket/0000.parquet", + file_format=FileFormat.PARQUET, + partition={}, + record_count=3, + file_size_in_bytes=3, + ), + ) + delete_entry_1 = ManifestEntry( + status=ManifestEntryStatus.ADDED, + sequence_number=3, + data_file=DataFile( + content=DataFileContent.POSITION_DELETES, + file_path="s3://bucket/0001-delete.parquet", + file_format=FileFormat.PARQUET, + partition={}, + record_count=3, + file_size_in_bytes=3, + # We don't really care about the tests here + value_counts={}, + null_value_counts={}, + nan_value_counts={}, + lower_bounds={}, + upper_bounds={}, + ), + ) + delete_entry_2 = ManifestEntry( + status=ManifestEntryStatus.ADDED, + sequence_number=3, + data_file=DataFile( + content=DataFileContent.POSITION_DELETES, + file_path="s3://bucket/0002-delete.parquet", + file_format=FileFormat.PARQUET, + partition={}, + record_count=3, + file_size_in_bytes=3, + # We don't really care about the tests here + value_counts={}, + null_value_counts={}, + nan_value_counts={}, + lower_bounds={}, + upper_bounds={}, + ), + ) + assert _match_deletes_to_datafile( + data_entry, + SortedList(iterable=[delete_entry_1, delete_entry_2], key=lambda entry: entry.sequence_number or INITIAL_SEQUENCE_NUMBER), + ) == { + delete_entry_1.data_file, + delete_entry_2.data_file, + } + + +def test_serialize_set_properties_updates() -> None: + assert SetPropertiesUpdate(updates={"abc": "🤪"}).model_dump_json() == """{"action":"set-properties","updates":{"abc":"🤪"}}""" + + +def test_add_column(table: Table) -> None: + update = UpdateSchema(table) + update.add_column(path="b", field_type=IntegerType()) + apply_schema: Schema = update._apply() # pylint: disable=W0212 + assert len(apply_schema.fields) == 4 + + assert apply_schema == Schema( + NestedField(field_id=1, name="x", field_type=LongType(), required=True), + NestedField(field_id=2, name="y", field_type=LongType(), required=True, doc="comment"), + NestedField(field_id=3, name="z", field_type=LongType(), required=True), + NestedField(field_id=4, name="b", field_type=IntegerType(), required=False), + identifier_field_ids=[1, 2], + ) + assert apply_schema.schema_id == 2 + assert apply_schema.highest_field_id == 4 + + +def test_add_primitive_type_column(table: Table) -> None: + primitive_type: Dict[str, PrimitiveType] = { + "boolean": BooleanType(), + "int": IntegerType(), + "long": LongType(), + "float": FloatType(), + "double": DoubleType(), + "date": DateType(), + "time": TimeType(), + "timestamp": TimestampType(), + "timestamptz": TimestamptzType(), + "string": StringType(), + "uuid": UUIDType(), + "binary": BinaryType(), + } + + for name, type_ in primitive_type.items(): + field_name = f"new_column_{name}" + update = UpdateSchema(table) + update.add_column(path=field_name, field_type=type_, doc=f"new_column_{name}") + new_schema = update._apply() # pylint: disable=W0212 + + field: NestedField = new_schema.find_field(field_name) + assert field.field_type == type_ + assert field.doc == f"new_column_{name}" + + +def test_add_nested_type_column(table: Table) -> None: + # add struct type column + field_name = "new_column_struct" + update = UpdateSchema(table) + struct_ = StructType( + NestedField(1, "lat", DoubleType()), + NestedField(2, "long", DoubleType()), + ) + update.add_column(path=field_name, field_type=struct_) + schema_ = update._apply() # pylint: disable=W0212 + field: NestedField = schema_.find_field(field_name) + assert field.field_type == StructType( + NestedField(5, "lat", DoubleType()), + NestedField(6, "long", DoubleType()), + ) + assert schema_.highest_field_id == 6 + + +def test_add_nested_map_type_column(table: Table) -> None: + # add map type column + field_name = "new_column_map" + update = UpdateSchema(table) + map_ = MapType(1, StringType(), 2, IntegerType(), False) + update.add_column(path=field_name, field_type=map_) + new_schema = update._apply() # pylint: disable=W0212 + field: NestedField = new_schema.find_field(field_name) + assert field.field_type == MapType(5, StringType(), 6, IntegerType(), False) + assert new_schema.highest_field_id == 6 + + +def test_add_nested_list_type_column(table: Table) -> None: + # add list type column + field_name = "new_column_list" + update = UpdateSchema(table) + list_ = ListType( + element_id=101, + element_type=StructType( + NestedField(102, "lat", DoubleType()), + NestedField(103, "long", DoubleType()), + ), + element_required=False, + ) + update.add_column(path=field_name, field_type=list_) + new_schema = update._apply() # pylint: disable=W0212 + field: NestedField = new_schema.find_field(field_name) + assert field.field_type == ListType( + element_id=5, + element_type=StructType( + NestedField(6, "lat", DoubleType()), + NestedField(7, "long", DoubleType()), + ), + element_required=False, + ) + assert new_schema.highest_field_id == 7 diff --git a/tests/table/test_metadata.py b/tests/table/test_metadata.py new file mode 100644 index 0000000000..2273843645 --- /dev/null +++ b/tests/table/test_metadata.py @@ -0,0 +1,713 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# pylint: disable=redefined-outer-name + +import io +import json +from copy import copy +from typing import Any, Dict +from unittest.mock import MagicMock, patch +from uuid import UUID + +import pytest + +from pyiceberg.exceptions import ValidationError +from pyiceberg.partitioning import PartitionField, PartitionSpec +from pyiceberg.schema import Schema +from pyiceberg.serializers import FromByteStream +from pyiceberg.table import SortOrder +from pyiceberg.table.metadata import ( + TableMetadataUtil, + TableMetadataV1, + TableMetadataV2, + new_table_metadata, +) +from pyiceberg.table.refs import SnapshotRef, SnapshotRefType +from pyiceberg.table.sorting import NullOrder, SortDirection, SortField +from pyiceberg.transforms import IdentityTransform +from pyiceberg.types import ( + BooleanType, + FloatType, + IntegerType, + ListType, + LongType, + MapType, + NestedField, + StringType, + StructType, +) + +EXAMPLE_TABLE_METADATA_V1 = { + "format-version": 1, + "table-uuid": "d20125c8-7284-442c-9aea-15fee620737c", + "location": "s3://bucket/test/location", + "last-updated-ms": 1602638573874, + "last-column-id": 3, + "schema": { + "type": "struct", + "fields": [ + {"id": 1, "name": "x", "required": True, "type": "long"}, + {"id": 2, "name": "y", "required": True, "type": "long", "doc": "comment"}, + {"id": 3, "name": "z", "required": True, "type": "long"}, + ], + }, + "partition-spec": [{"name": "x", "transform": "identity", "source-id": 1, "field-id": 1000}], + "properties": {}, + "current-snapshot-id": -1, + "snapshots": [{"snapshot-id": 1925, "timestamp-ms": 1602638573822}], +} + + +@pytest.fixture(scope="session") +def example_table_metadata_v1() -> Dict[str, Any]: + return EXAMPLE_TABLE_METADATA_V1 + + +def test_from_dict_v1(example_table_metadata_v1: Dict[str, Any]) -> None: + """Test initialization of a TableMetadata instance from a dictionary""" + TableMetadataUtil.parse_obj(example_table_metadata_v1) + + +def test_from_dict_v1_parse_raw(example_table_metadata_v1: Dict[str, Any]) -> None: + """Test initialization of a TableMetadata instance from a str""" + TableMetadataUtil.parse_raw(json.dumps(example_table_metadata_v1)) + + +def test_from_dict_v2(example_table_metadata_v2: Dict[str, Any]) -> None: + """Test initialization of a TableMetadata instance from a dictionary""" + TableMetadataUtil.parse_obj(example_table_metadata_v2) + + +def test_from_dict_v2_parse_raw(example_table_metadata_v2: Dict[str, Any]) -> None: + """Test initialization of a TableMetadata instance from a str""" + TableMetadataUtil.parse_raw(json.dumps(example_table_metadata_v2)) + + +def test_from_byte_stream(example_table_metadata_v2: Dict[str, Any]) -> None: + """Test generating a TableMetadata instance from a file-like byte stream""" + data = bytes(json.dumps(example_table_metadata_v2), encoding="utf-8") + byte_stream = io.BytesIO(data) + FromByteStream.table_metadata(byte_stream=byte_stream) + + +def test_v2_metadata_parsing(example_table_metadata_v2: Dict[str, Any]) -> None: + """Test retrieving values from a TableMetadata instance of version 2""" + table_metadata = TableMetadataUtil.parse_obj(example_table_metadata_v2) + + assert table_metadata.format_version == 2 + assert table_metadata.table_uuid == UUID("9c12d441-03fe-4693-9a96-a0705ddf69c1") + assert table_metadata.location == "s3://bucket/test/location" + assert table_metadata.last_sequence_number == 34 + assert table_metadata.last_updated_ms == 1602638573590 + assert table_metadata.last_column_id == 3 + assert table_metadata.schemas[0].schema_id == 0 + assert table_metadata.current_schema_id == 1 + assert table_metadata.partition_specs[0].spec_id == 0 + assert table_metadata.default_spec_id == 0 + assert table_metadata.last_partition_id == 1000 + assert table_metadata.properties["read.split.target.size"] == "134217728" + assert table_metadata.current_snapshot_id == 3055729675574597004 + assert table_metadata.snapshots[0].snapshot_id == 3051729675574597004 + assert table_metadata.snapshot_log[0].timestamp_ms == 1515100955770 + assert table_metadata.sort_orders[0].order_id == 3 + assert table_metadata.default_sort_order_id == 3 + + +def test_v1_metadata_parsing_directly(example_table_metadata_v1: Dict[str, Any]) -> None: + """Test retrieving values from a TableMetadata instance of version 1""" + table_metadata = TableMetadataV1(**example_table_metadata_v1) + + assert isinstance(table_metadata, TableMetadataV1) + + # The version 1 will automatically be bumped to version 2 + assert table_metadata.format_version == 1 + assert table_metadata.table_uuid == UUID("d20125c8-7284-442c-9aea-15fee620737c") + assert table_metadata.location == "s3://bucket/test/location" + assert table_metadata.last_updated_ms == 1602638573874 + assert table_metadata.last_column_id == 3 + assert table_metadata.schemas == [ + Schema( + NestedField(field_id=1, name="x", field_type=LongType(), required=True), + NestedField(field_id=2, name="y", field_type=LongType(), required=True, doc="comment"), + NestedField(field_id=3, name="z", field_type=LongType(), required=True), + schema_id=0, + identifier_field_ids=[], + ) + ] + assert table_metadata.schemas[0].schema_id == 0 + assert table_metadata.current_schema_id == 0 + assert table_metadata.partition_specs == [ + PartitionSpec(PartitionField(source_id=1, field_id=1000, transform=IdentityTransform(), name="x")) + ] + assert table_metadata.default_spec_id == 0 + assert table_metadata.last_partition_id == 1000 + assert table_metadata.current_snapshot_id is None + assert table_metadata.default_sort_order_id == 0 + + +def test_parsing_correct_types(example_table_metadata_v2: Dict[str, Any]) -> None: + table_metadata = TableMetadataV2(**example_table_metadata_v2) + assert isinstance(table_metadata.schemas[0], Schema) + assert isinstance(table_metadata.schemas[0].fields[0], NestedField) + assert isinstance(table_metadata.schemas[0].fields[0].field_type, LongType) + + +def test_updating_metadata(example_table_metadata_v2: Dict[str, Any]) -> None: + """Test creating a new TableMetadata instance that's an updated version of + an existing TableMetadata instance""" + table_metadata = TableMetadataV2(**example_table_metadata_v2) + + new_schema = { + "type": "struct", + "schema-id": 1, + "fields": [ + {"id": 1, "name": "foo", "required": True, "type": "string"}, + {"id": 2, "name": "bar", "required": True, "type": "string"}, + {"id": 3, "name": "baz", "required": True, "type": "string"}, + ], + } + + mutable_table_metadata = table_metadata.model_dump() + mutable_table_metadata["schemas"].append(new_schema) + mutable_table_metadata["current-schema-id"] = 1 + + table_metadata = TableMetadataV2(**mutable_table_metadata) + + assert table_metadata.current_schema_id == 1 + assert table_metadata.schemas[-1] == Schema(**new_schema) + + +def test_serialize_v1(example_table_metadata_v1: Dict[str, Any]) -> None: + table_metadata = TableMetadataV1(**example_table_metadata_v1) + table_metadata_json = table_metadata.model_dump_json() + expected = """{"location":"s3://bucket/test/location","table-uuid":"d20125c8-7284-442c-9aea-15fee620737c","last-updated-ms":1602638573874,"last-column-id":3,"schemas":[{"type":"struct","fields":[{"id":1,"name":"x","type":"long","required":true},{"id":2,"name":"y","type":"long","required":true,"doc":"comment"},{"id":3,"name":"z","type":"long","required":true}],"schema-id":0,"identifier-field-ids":[]}],"current-schema-id":0,"partition-specs":[{"spec-id":0,"fields":[{"source-id":1,"field-id":1000,"transform":"identity","name":"x"}]}],"default-spec-id":0,"last-partition-id":1000,"properties":{},"snapshots":[{"snapshot-id":1925,"timestamp-ms":1602638573822}],"snapshot-log":[],"metadata-log":[],"sort-orders":[{"order-id":0,"fields":[]}],"default-sort-order-id":0,"refs":{},"format-version":1,"schema":{"type":"struct","fields":[{"id":1,"name":"x","type":"long","required":true},{"id":2,"name":"y","type":"long","required":true,"doc":"comment"},{"id":3,"name":"z","type":"long","required":true}],"schema-id":0,"identifier-field-ids":[]},"partition-spec":[{"name":"x","transform":"identity","source-id":1,"field-id":1000}]}""" + assert table_metadata_json == expected + + +def test_serialize_v2(example_table_metadata_v2: Dict[str, Any]) -> None: + table_metadata = TableMetadataV2(**example_table_metadata_v2).model_dump_json() + expected = """{"location":"s3://bucket/test/location","table-uuid":"9c12d441-03fe-4693-9a96-a0705ddf69c1","last-updated-ms":1602638573590,"last-column-id":3,"schemas":[{"type":"struct","fields":[{"id":1,"name":"x","type":"long","required":true}],"schema-id":0,"identifier-field-ids":[]},{"type":"struct","fields":[{"id":1,"name":"x","type":"long","required":true},{"id":2,"name":"y","type":"long","required":true,"doc":"comment"},{"id":3,"name":"z","type":"long","required":true}],"schema-id":1,"identifier-field-ids":[1,2]}],"current-schema-id":1,"partition-specs":[{"spec-id":0,"fields":[{"source-id":1,"field-id":1000,"transform":"identity","name":"x"}]}],"default-spec-id":0,"last-partition-id":1000,"properties":{"read.split.target.size":"134217728"},"current-snapshot-id":3055729675574597004,"snapshots":[{"snapshot-id":3051729675574597004,"sequence-number":0,"timestamp-ms":1515100955770,"manifest-list":"s3://a/b/1.avro","summary":{"operation":"append"}},{"snapshot-id":3055729675574597004,"parent-snapshot-id":3051729675574597004,"sequence-number":1,"timestamp-ms":1555100955770,"manifest-list":"s3://a/b/2.avro","summary":{"operation":"append"},"schema-id":1}],"snapshot-log":[{"snapshot-id":3051729675574597004,"timestamp-ms":1515100955770},{"snapshot-id":3055729675574597004,"timestamp-ms":1555100955770}],"metadata-log":[{"metadata-file":"s3://bucket/.../v1.json","timestamp-ms":1515100}],"sort-orders":[{"order-id":3,"fields":[{"source-id":2,"transform":"identity","direction":"asc","null-order":"nulls-first"},{"source-id":3,"transform":"bucket[4]","direction":"desc","null-order":"nulls-last"}]}],"default-sort-order-id":3,"refs":{"test":{"snapshot-id":3051729675574597004,"type":"tag","max-ref-age-ms":10000000},"main":{"snapshot-id":3055729675574597004,"type":"branch"}},"format-version":2,"last-sequence-number":34}""" + assert table_metadata == expected + + +def test_migrate_v1_schemas(example_table_metadata_v1: Dict[str, Any]) -> None: + table_metadata = TableMetadataV1(**example_table_metadata_v1) + + assert isinstance(table_metadata, TableMetadataV1) + assert len(table_metadata.schemas) == 1 + assert table_metadata.schemas[0] == table_metadata.schema_ + + +def test_migrate_v1_partition_specs(example_table_metadata_v1: Dict[str, Any]) -> None: + # Copy the example, and add a spec + table_metadata = TableMetadataV1(**example_table_metadata_v1) + assert isinstance(table_metadata, TableMetadataV1) + assert len(table_metadata.partition_specs) == 1 + # Spec ID gets added automatically + assert table_metadata.partition_specs == [ + PartitionSpec(PartitionField(source_id=1, field_id=1000, transform=IdentityTransform(), name="x")), + ] + + +def test_invalid_format_version(example_table_metadata_v1: Dict[str, Any]) -> None: + """Test the exception when trying to load an unknown version""" + + example_table_metadata_v22 = copy(example_table_metadata_v1) + example_table_metadata_v22["format-version"] = -1 + + with pytest.raises(ValidationError) as exc_info: + TableMetadataUtil.parse_raw(json.dumps(example_table_metadata_v22)) + + assert "Input tag '-1' found using 'format_version'" in str(exc_info.value) + + +def test_current_schema_not_found() -> None: + """Test that we raise an exception when the schema can't be found""" + + table_metadata_schema_not_found = { + "format-version": 2, + "table-uuid": "d20125c8-7284-442c-9aea-15fee620737c", + "location": "s3://bucket/test/location", + "last-updated-ms": 1602638573874, + "last-column-id": 3, + "schemas": [ + {"type": "struct", "schema-id": 0, "fields": [{"id": 1, "name": "x", "required": True, "type": "long"}]}, + { + "type": "struct", + "schema-id": 1, + "identifier-field-ids": [1, 2], + "fields": [ + {"id": 1, "name": "x", "required": True, "type": "long"}, + {"id": 2, "name": "y", "required": True, "type": "long", "doc": "comment"}, + {"id": 3, "name": "z", "required": True, "type": "long"}, + ], + }, + ], + "current-schema-id": 2, + "default-spec-id": 0, + "partition-specs": [{"spec-id": 0, "fields": [{"name": "x", "transform": "identity", "source-id": 1, "field-id": 1000}]}], + "last-partition-id": 1000, + "default-sort-order-id": 0, + "properties": {}, + "current-snapshot-id": -1, + "snapshots": [], + } + + with pytest.raises(ValidationError) as exc_info: + TableMetadataUtil.parse_raw(json.dumps(table_metadata_schema_not_found)) + + assert "current-schema-id 2 can't be found in the schemas" in str(exc_info.value) + + +def test_sort_order_not_found() -> None: + """Test that we raise an exception when the schema can't be found""" + + table_metadata_schema_not_found = { + "format-version": 2, + "table-uuid": "d20125c8-7284-442c-9aea-15fee620737c", + "location": "s3://bucket/test/location", + "last-updated-ms": 1602638573874, + "last-column-id": 3, + "schemas": [ + { + "type": "struct", + "schema-id": 0, + "identifier-field-ids": [1, 2], + "fields": [ + {"id": 1, "name": "x", "required": True, "type": "long"}, + {"id": 2, "name": "y", "required": True, "type": "long", "doc": "comment"}, + {"id": 3, "name": "z", "required": True, "type": "long"}, + ], + }, + ], + "default-sort-order-id": 4, + "sort-orders": [ + { + "order-id": 3, + "fields": [ + {"transform": "identity", "source-id": 2, "direction": "asc", "null-order": "nulls-first"}, + {"transform": "bucket[4]", "source-id": 3, "direction": "desc", "null-order": "nulls-last"}, + ], + } + ], + "current-schema-id": 0, + "default-spec-id": 0, + "partition-specs": [{"spec-id": 0, "fields": [{"name": "x", "transform": "identity", "source-id": 1, "field-id": 1000}]}], + "last-partition-id": 1000, + "properties": {}, + "current-snapshot-id": -1, + "snapshots": [], + } + + with pytest.raises(ValidationError) as exc_info: + TableMetadataUtil.parse_raw(json.dumps(table_metadata_schema_not_found)) + + assert "default-sort-order-id 4 can't be found" in str(exc_info.value) + + +def test_sort_order_unsorted() -> None: + """Test that we raise an exception when the schema can't be found""" + + table_metadata_schema_not_found = { + "format-version": 2, + "table-uuid": "d20125c8-7284-442c-9aea-15fee620737c", + "location": "s3://bucket/test/location", + "last-updated-ms": 1602638573874, + "last-column-id": 3, + "schemas": [ + { + "type": "struct", + "schema-id": 0, + "identifier-field-ids": [1, 2], + "fields": [ + {"id": 1, "name": "x", "required": True, "type": "long"}, + {"id": 2, "name": "y", "required": True, "type": "long", "doc": "comment"}, + {"id": 3, "name": "z", "required": True, "type": "long"}, + ], + }, + ], + "default-sort-order-id": 0, + "sort-orders": [], + "current-schema-id": 0, + "default-spec-id": 0, + "partition-specs": [{"spec-id": 0, "fields": [{"name": "x", "transform": "identity", "source-id": 1, "field-id": 1000}]}], + "last-partition-id": 1000, + "properties": {}, + "current-snapshot-id": -1, + "snapshots": [], + } + + table_metadata = TableMetadataUtil.parse_raw(json.dumps(table_metadata_schema_not_found)) + + # Most important here is that we correctly handle sort-order-id 0 + assert len(table_metadata.sort_orders) == 0 + + +def test_invalid_partition_spec() -> None: + table_metadata_spec_not_found = { + "format-version": 2, + "table-uuid": "9c12d441-03fe-4693-9a96-a0705ddf69c1", + "location": "s3://bucket/test/location", + "last-sequence-number": 34, + "last-updated-ms": 1602638573590, + "last-column-id": 3, + "current-schema-id": 1, + "schemas": [ + {"type": "struct", "schema-id": 0, "fields": [{"id": 1, "name": "x", "required": True, "type": "long"}]}, + { + "type": "struct", + "schema-id": 1, + "identifier-field-ids": [1, 2], + "fields": [ + {"id": 1, "name": "x", "required": True, "type": "long"}, + {"id": 2, "name": "y", "required": True, "type": "long", "doc": "comment"}, + {"id": 3, "name": "z", "required": True, "type": "long"}, + ], + }, + ], + "sort-orders": [], + "default-sort-order-id": 0, + "default-spec-id": 1, + "partition-specs": [{"spec-id": 0, "fields": [{"name": "x", "transform": "identity", "source-id": 1, "field-id": 1000}]}], + "last-partition-id": 1000, + } + with pytest.raises(ValidationError) as exc_info: + TableMetadataUtil.parse_raw(json.dumps(table_metadata_spec_not_found)) + + assert "default-spec-id 1 can't be found" in str(exc_info.value) + + +def test_v1_writing_metadata(example_table_metadata_v1: Dict[str, Any]) -> None: + """ + https://iceberg.apache.org/spec/#version-2 + + Writing v1 metadata: + - Table metadata field last-sequence-number should not be written + """ + + table_metadata = TableMetadataV1(**example_table_metadata_v1) + metadata_v1_json = table_metadata.model_dump_json() + metadata_v1 = json.loads(metadata_v1_json) + + assert "last-sequence-number" not in metadata_v1 + + +def test_v1_metadata_for_v2(example_table_metadata_v1: Dict[str, Any]) -> None: + """ + https://iceberg.apache.org/spec/#version-2 + + Reading v1 metadata for v2: + - Table metadata field last-sequence-number must default to 0 + """ + + table_metadata = TableMetadataV1(**example_table_metadata_v1).to_v2() + + assert table_metadata.last_sequence_number == 0 + + +def test_v1_write_metadata_for_v2() -> None: + """ + https://iceberg.apache.org/spec/#version-2 + + Table metadata JSON: + - last-sequence-number was added and is required; default to 0 when reading v1 metadata + - table-uuid is now required + - current-schema-id is now required + - schemas is now required + - partition-specs is now required + - default-spec-id is now required + - last-partition-id is now required + - sort-orders is now required + - default-sort-order-id is now required + - schema is no longer required and should be omitted; use schemas and current-schema-id instead + - partition-spec is no longer required and should be omitted; use partition-specs and default-spec-id instead + """ + + minimal_example_v1 = { + "format-version": 1, + "location": "s3://bucket/test/location", + "last-updated-ms": 1602638573874, + "last-column-id": 3, + "schema": { + "type": "struct", + "fields": [ + {"id": 1, "name": "x", "required": True, "type": "long"}, + {"id": 2, "name": "y", "required": True, "type": "long", "doc": "comment"}, + {"id": 3, "name": "z", "required": True, "type": "long"}, + ], + }, + "partition-spec": [{"name": "x", "transform": "identity", "source-id": 1, "field-id": 1000}], + "properties": {}, + "current-snapshot-id": -1, + "snapshots": [{"snapshot-id": 1925, "timestamp-ms": 1602638573822}], + } + + table_metadata = TableMetadataV1(**minimal_example_v1).to_v2() + metadata_v2_json = table_metadata.model_dump_json() + metadata_v2 = json.loads(metadata_v2_json) + + assert metadata_v2["last-sequence-number"] == 0 + assert UUID(metadata_v2["table-uuid"]) is not None + assert metadata_v2["current-schema-id"] == 0 + assert metadata_v2["schemas"] == [ + { + "fields": [ + {"id": 1, "name": "x", "required": True, "type": "long"}, + {"doc": "comment", "id": 2, "name": "y", "required": True, "type": "long"}, + {"id": 3, "name": "z", "required": True, "type": "long"}, + ], + "identifier-field-ids": [], + "schema-id": 0, + "type": "struct", + } + ] + assert metadata_v2["partition-specs"] == [ + { + "spec-id": 0, + "fields": [{"name": "x", "transform": "identity", "source-id": 1, "field-id": 1000}], + } + ] + assert metadata_v2["default-spec-id"] == 0 + assert metadata_v2["last-partition-id"] == 1000 + assert metadata_v2["sort-orders"] == [{"order-id": 0, "fields": []}] + assert metadata_v2["default-sort-order-id"] == 0 + # Deprecated fields + assert "schema" not in metadata_v2 + assert "partition-spec" not in metadata_v2 + + +def test_v2_ref_creation(example_table_metadata_v2: Dict[str, Any]) -> None: + table_metadata = TableMetadataV2(**example_table_metadata_v2) + assert table_metadata.refs == { + "main": SnapshotRef( + snapshot_id=3055729675574597004, + snapshot_ref_type=SnapshotRefType.BRANCH, + min_snapshots_to_keep=None, + max_snapshot_age_ms=None, + max_ref_age_ms=None, + ), + "test": SnapshotRef( + snapshot_id=3051729675574597004, + snapshot_ref_type=SnapshotRefType.TAG, + min_snapshots_to_keep=None, + max_snapshot_age_ms=None, + max_ref_age_ms=10000000, + ), + } + + +def test_metadata_v1() -> None: + valid_v1 = { + "format-version": 1, + "table-uuid": "bf289591-dcc0-4234-ad4f-5c3eed811a29", + "location": "s3://tabular-wh-us-west-2-dev/8bcb0838-50fc-472d-9ddb-8feb89ef5f1e/bf289591-dcc0-4234-ad4f-5c3eed811a29", + "last-updated-ms": 1657810967051, + "last-column-id": 3, + "schema": { + "type": "struct", + "schema-id": 0, + "identifier-field-ids": [2], + "fields": [ + {"id": 1, "name": "foo", "required": False, "type": "string"}, + {"id": 2, "name": "bar", "required": True, "type": "int"}, + {"id": 3, "name": "baz", "required": False, "type": "boolean"}, + ], + }, + "current-schema-id": 0, + "schemas": [ + { + "type": "struct", + "schema-id": 0, + "identifier-field-ids": [2], + "fields": [ + {"id": 1, "name": "foo", "required": False, "type": "string"}, + {"id": 2, "name": "bar", "required": True, "type": "int"}, + {"id": 3, "name": "baz", "required": False, "type": "boolean"}, + ], + } + ], + "partition-spec": [], + "default-spec-id": 0, + "partition-specs": [{"spec-id": 0, "fields": []}], + "last-partition-id": 999, + "default-sort-order-id": 0, + "sort-orders": [{"order-id": 0, "fields": []}], + "properties": { + "write.delete.parquet.compression-codec": "zstd", + "write.metadata.compression-codec": "gzip", + "write.summary.partition-limit": "100", + "write.parquet.compression-codec": "zstd", + }, + "current-snapshot-id": -1, + "refs": {}, + "snapshots": [], + "snapshot-log": [], + "metadata-log": [], + } + TableMetadataV1(**valid_v1) + + +@patch("time.time", MagicMock(return_value=12345)) +def test_make_metadata_fresh() -> None: + schema = Schema( + NestedField(field_id=10, name="foo", field_type=StringType(), required=False), + NestedField(field_id=22, name="bar", field_type=IntegerType(), required=True), + NestedField(field_id=33, name="baz", field_type=BooleanType(), required=False), + NestedField( + field_id=41, + name="qux", + field_type=ListType(element_id=56, element_type=StringType(), element_required=True), + required=True, + ), + NestedField( + field_id=6, + name="quux", + field_type=MapType( + key_id=77, + key_type=StringType(), + value_id=88, + value_type=MapType(key_id=91, key_type=StringType(), value_id=102, value_type=IntegerType(), value_required=True), + value_required=True, + ), + required=True, + ), + NestedField( + field_id=113, + name="location", + field_type=ListType( + element_id=124, + element_type=StructType( + NestedField(field_id=132, name="latitude", field_type=FloatType(), required=False), + NestedField(field_id=143, name="longitude", field_type=FloatType(), required=False), + ), + element_required=True, + ), + required=True, + ), + NestedField( + field_id=155, + name="person", + field_type=StructType( + NestedField(field_id=169, name="name", field_type=StringType(), required=False), + NestedField(field_id=178, name="age", field_type=IntegerType(), required=True), + ), + required=False, + ), + schema_id=10, + identifier_field_ids=[22], + ) + + partition_spec = PartitionSpec( + PartitionField(source_id=22, field_id=1022, transform=IdentityTransform(), name="bar"), spec_id=10 + ) + + sort_order = SortOrder( + SortField(source_id=10, transform=IdentityTransform(), direction=SortDirection.ASC, null_order=NullOrder.NULLS_LAST), + order_id=10, + ) + + actual = new_table_metadata( + schema=schema, partition_spec=partition_spec, sort_order=sort_order, location="s3://", properties={} + ) + + expected = TableMetadataV2( + location="s3://", + table_uuid=actual.table_uuid, + last_updated_ms=actual.last_updated_ms, + last_column_id=17, + schemas=[ + Schema( + NestedField(field_id=1, name="foo", field_type=StringType(), required=False), + NestedField(field_id=2, name="bar", field_type=IntegerType(), required=True), + NestedField(field_id=3, name="baz", field_type=BooleanType(), required=False), + NestedField( + field_id=4, + name="qux", + field_type=ListType(type="list", element_id=8, element_type=StringType(), element_required=True), + required=True, + ), + NestedField( + field_id=5, + name="quux", + field_type=MapType( + type="map", + key_id=9, + key_type=StringType(), + value_id=10, + value_type=MapType( + type="map", + key_id=11, + key_type=StringType(), + value_id=12, + value_type=IntegerType(), + value_required=True, + ), + value_required=True, + ), + required=True, + ), + NestedField( + field_id=6, + name="location", + field_type=ListType( + type="list", + element_id=13, + element_type=StructType( + NestedField(field_id=14, name="latitude", field_type=FloatType(), required=False), + NestedField(field_id=15, name="longitude", field_type=FloatType(), required=False), + ), + element_required=True, + ), + required=True, + ), + NestedField( + field_id=7, + name="person", + field_type=StructType( + NestedField(field_id=16, name="name", field_type=StringType(), required=False), + NestedField(field_id=17, name="age", field_type=IntegerType(), required=True), + ), + required=False, + ), + schema_id=0, + identifier_field_ids=[2], + ) + ], + current_schema_id=0, + partition_specs=[PartitionSpec(PartitionField(source_id=2, field_id=1000, transform=IdentityTransform(), name="bar"))], + default_spec_id=0, + last_partition_id=1000, + properties={}, + current_snapshot_id=None, + snapshots=[], + snapshot_log=[], + metadata_log=[], + sort_orders=[ + SortOrder( + SortField( + source_id=1, transform=IdentityTransform(), direction=SortDirection.ASC, null_order=NullOrder.NULLS_LAST + ), + order_id=1, + ) + ], + default_sort_order_id=1, + refs={}, + format_version=2, + last_sequence_number=0, + ) + + assert actual.model_dump() == expected.model_dump() diff --git a/tests/table/test_partitioning.py b/tests/table/test_partitioning.py new file mode 100644 index 0000000000..cb60c9a8e5 --- /dev/null +++ b/tests/table/test_partitioning.py @@ -0,0 +1,131 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +from pyiceberg.partitioning import UNPARTITIONED_PARTITION_SPEC, PartitionField, PartitionSpec +from pyiceberg.schema import Schema +from pyiceberg.transforms import BucketTransform, TruncateTransform +from pyiceberg.types import ( + IntegerType, + NestedField, + StringType, + StructType, +) + + +def test_partition_field_init() -> None: + bucket_transform = BucketTransform(100) # type: ignore + partition_field = PartitionField(3, 1000, bucket_transform, "id") + + assert partition_field.source_id == 3 + assert partition_field.field_id == 1000 + assert partition_field.transform == bucket_transform + assert partition_field.name == "id" + assert partition_field == partition_field + assert str(partition_field) == "1000: id: bucket[100](3)" + assert ( + repr(partition_field) + == "PartitionField(source_id=3, field_id=1000, transform=BucketTransform(num_buckets=100), name='id')" + ) + + +def test_unpartitioned_partition_spec_repr() -> None: + assert repr(PartitionSpec()) == "PartitionSpec(spec_id=0)" + + +def test_partition_spec_init() -> None: + bucket_transform: BucketTransform = BucketTransform(4) # type: ignore + + id_field1 = PartitionField(3, 1001, bucket_transform, "id") + partition_spec1 = PartitionSpec(id_field1) + + assert partition_spec1.spec_id == 0 + assert partition_spec1 == partition_spec1 + assert partition_spec1 != id_field1 + assert str(partition_spec1) == f"[\n {str(id_field1)}\n]" + assert not partition_spec1.is_unpartitioned() + # only differ by PartitionField field_id + id_field2 = PartitionField(3, 1002, bucket_transform, "id") + partition_spec2 = PartitionSpec(id_field2) + assert partition_spec1 != partition_spec2 + assert partition_spec1.compatible_with(partition_spec2) + assert partition_spec1.fields_by_source_id(3) == [id_field1] + # Does not exist + assert partition_spec1.fields_by_source_id(1925) == [] + + +def test_partition_compatible_with() -> None: + bucket_transform: BucketTransform = BucketTransform(4) # type: ignore + field1 = PartitionField(3, 100, bucket_transform, "id") + field2 = PartitionField(3, 102, bucket_transform, "id") + lhs = PartitionSpec( + field1, + ) + rhs = PartitionSpec(field1, field2) + assert not lhs.compatible_with(rhs) + + +def test_unpartitioned() -> None: + assert len(UNPARTITIONED_PARTITION_SPEC.fields) == 0 + assert UNPARTITIONED_PARTITION_SPEC.is_unpartitioned() + assert str(UNPARTITIONED_PARTITION_SPEC) == "[]" + + +def test_serialize_unpartitioned_spec() -> None: + assert UNPARTITIONED_PARTITION_SPEC.model_dump_json() == """{"spec-id":0,"fields":[]}""" + + +def test_serialize_partition_spec() -> None: + partitioned = PartitionSpec( + PartitionField(source_id=1, field_id=1000, transform=TruncateTransform(width=19), name="str_truncate"), + PartitionField(source_id=2, field_id=1001, transform=BucketTransform(num_buckets=25), name="int_bucket"), + spec_id=3, + ) + assert ( + partitioned.model_dump_json() + == """{"spec-id":3,"fields":[{"source-id":1,"field-id":1000,"transform":"truncate[19]","name":"str_truncate"},{"source-id":2,"field-id":1001,"transform":"bucket[25]","name":"int_bucket"}]}""" + ) + + +def test_deserialize_unpartition_spec() -> None: + json_partition_spec = """{"spec-id":0,"fields":[]}""" + spec = PartitionSpec.model_validate_json(json_partition_spec) + + assert spec == PartitionSpec(spec_id=0) + + +def test_deserialize_partition_spec() -> None: + json_partition_spec = """{"spec-id": 3, "fields": [{"source-id": 1, "field-id": 1000, "transform": "truncate[19]", "name": "str_truncate"}, {"source-id": 2, "field-id": 1001, "transform": "bucket[25]", "name": "int_bucket"}]}""" + + spec = PartitionSpec.model_validate_json(json_partition_spec) + + assert spec == PartitionSpec( + PartitionField(source_id=1, field_id=1000, transform=TruncateTransform(width=19), name="str_truncate"), + PartitionField(source_id=2, field_id=1001, transform=BucketTransform(num_buckets=25), name="int_bucket"), + spec_id=3, + ) + + +def test_partition_type(table_schema_simple: Schema) -> None: + spec = PartitionSpec( + PartitionField(source_id=1, field_id=1000, transform=TruncateTransform(width=19), name="str_truncate"), + PartitionField(source_id=2, field_id=1001, transform=BucketTransform(num_buckets=25), name="int_bucket"), + spec_id=3, + ) + + assert spec.partition_type(table_schema_simple) == StructType( + NestedField(field_id=1000, name="str_truncate", field_type=StringType(), required=False), + NestedField(field_id=1001, name="int_bucket", field_type=IntegerType(), required=False), + ) diff --git a/tests/table/test_refs.py b/tests/table/test_refs.py new file mode 100644 index 0000000000..d106f0237a --- /dev/null +++ b/tests/table/test_refs.py @@ -0,0 +1,34 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# pylint:disable=eval-used +from pyiceberg.table.refs import SnapshotRef, SnapshotRefType + + +def test_snapshot_with_properties_repr() -> None: + snapshot_ref = SnapshotRef( + snapshot_id=3051729675574597004, + snapshot_ref_type=SnapshotRefType.TAG, + min_snapshots_to_keep=None, + max_snapshot_age_ms=None, + max_ref_age_ms=10000000, + ) + + assert ( + repr(snapshot_ref) + == """SnapshotRef(snapshot_id=3051729675574597004, snapshot_ref_type=SnapshotRefType.TAG, min_snapshots_to_keep=None, max_snapshot_age_ms=None, max_ref_age_ms=10000000)""" + ) + assert snapshot_ref == eval(repr(snapshot_ref)) diff --git a/tests/table/test_snapshots.py b/tests/table/test_snapshots.py new file mode 100644 index 0000000000..625cbc1b6c --- /dev/null +++ b/tests/table/test_snapshots.py @@ -0,0 +1,122 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# pylint:disable=redefined-outer-name,eval-used +import pytest + +from pyiceberg.table.snapshots import Operation, Snapshot, Summary + + +@pytest.fixture +def snapshot() -> Snapshot: + return Snapshot( + snapshot_id=25, + parent_snapshot_id=19, + sequence_number=200, + timestamp_ms=1602638573590, + manifest_list="s3:/a/b/c.avro", + summary=Summary(Operation.APPEND), + schema_id=3, + ) + + +@pytest.fixture +def snapshot_with_properties() -> Snapshot: + return Snapshot( + snapshot_id=25, + parent_snapshot_id=19, + sequence_number=200, + timestamp_ms=1602638573590, + manifest_list="s3:/a/b/c.avro", + summary=Summary(Operation.APPEND, foo="bar"), + schema_id=3, + ) + + +def test_serialize_summary() -> None: + assert Summary(Operation.APPEND).model_dump_json() == """{"operation":"append"}""" + + +def test_serialize_summary_with_properties() -> None: + summary = Summary(Operation.APPEND, property="yes") + assert summary.model_dump_json() == """{"operation":"append","property":"yes"}""" + + +def test_serialize_snapshot(snapshot: Snapshot) -> None: + assert ( + snapshot.model_dump_json() + == """{"snapshot-id":25,"parent-snapshot-id":19,"sequence-number":200,"timestamp-ms":1602638573590,"manifest-list":"s3:/a/b/c.avro","summary":{"operation":"append"},"schema-id":3}""" + ) + + +def test_serialize_snapshot_without_sequence_number() -> None: + snapshot = Snapshot( + snapshot_id=25, + parent_snapshot_id=19, + timestamp_ms=1602638573590, + manifest_list="s3:/a/b/c.avro", + summary=Summary(Operation.APPEND), + schema_id=3, + ) + actual = snapshot.model_dump_json() + expected = """{"snapshot-id":25,"parent-snapshot-id":19,"timestamp-ms":1602638573590,"manifest-list":"s3:/a/b/c.avro","summary":{"operation":"append"},"schema-id":3}""" + assert actual == expected + + +def test_serialize_snapshot_with_properties(snapshot_with_properties: Snapshot) -> None: + assert ( + snapshot_with_properties.model_dump_json() + == """{"snapshot-id":25,"parent-snapshot-id":19,"sequence-number":200,"timestamp-ms":1602638573590,"manifest-list":"s3:/a/b/c.avro","summary":{"operation":"append","foo":"bar"},"schema-id":3}""" + ) + + +def test_deserialize_summary() -> None: + summary = Summary.model_validate_json("""{"operation": "append"}""") + assert summary.operation == Operation.APPEND + + +def test_deserialize_summary_with_properties() -> None: + summary = Summary.model_validate_json("""{"operation": "append", "property": "yes"}""") + assert summary.operation == Operation.APPEND + assert summary.additional_properties == {"property": "yes"} + + +def test_deserialize_snapshot(snapshot: Snapshot) -> None: + payload = """{"snapshot-id": 25, "parent-snapshot-id": 19, "sequence-number": 200, "timestamp-ms": 1602638573590, "manifest-list": "s3:/a/b/c.avro", "summary": {"operation": "append"}, "schema-id": 3}""" + actual = Snapshot.model_validate_json(payload) + assert actual == snapshot + + +def test_deserialize_snapshot_with_properties(snapshot_with_properties: Snapshot) -> None: + payload = """{"snapshot-id":25,"parent-snapshot-id":19,"sequence-number":200,"timestamp-ms":1602638573590,"manifest-list":"s3:/a/b/c.avro","summary":{"operation":"append","foo":"bar"},"schema-id":3}""" + snapshot = Snapshot.model_validate_json(payload) + assert snapshot == snapshot_with_properties + + +def test_snapshot_repr(snapshot: Snapshot) -> None: + assert ( + repr(snapshot) + == """Snapshot(snapshot_id=25, parent_snapshot_id=19, sequence_number=200, timestamp_ms=1602638573590, manifest_list='s3:/a/b/c.avro', summary=Summary(Operation.APPEND), schema_id=3)""" + ) + assert snapshot == eval(repr(snapshot)) + + +def test_snapshot_with_properties_repr(snapshot_with_properties: Snapshot) -> None: + assert ( + repr(snapshot_with_properties) + == """Snapshot(snapshot_id=25, parent_snapshot_id=19, sequence_number=200, timestamp_ms=1602638573590, manifest_list='s3:/a/b/c.avro', summary=Summary(Operation.APPEND, **{'foo': 'bar'}), schema_id=3)""" + ) + assert snapshot_with_properties == eval(repr(snapshot_with_properties)) diff --git a/tests/table/test_sorting.py b/tests/table/test_sorting.py new file mode 100644 index 0000000000..6b41193631 --- /dev/null +++ b/tests/table/test_sorting.py @@ -0,0 +1,97 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# pylint:disable=redefined-outer-name,eval-used +import json +from typing import Any, Dict + +import pytest + +from pyiceberg.table.metadata import TableMetadataUtil +from pyiceberg.table.sorting import ( + UNSORTED_SORT_ORDER, + NullOrder, + SortDirection, + SortField, + SortOrder, +) +from pyiceberg.transforms import BucketTransform, IdentityTransform, VoidTransform + + +@pytest.fixture +def sort_order() -> SortOrder: + return SortOrder( + SortField(source_id=19, transform=IdentityTransform(), null_order=NullOrder.NULLS_FIRST), + SortField(source_id=25, transform=BucketTransform(4), direction=SortDirection.DESC), + SortField(source_id=22, transform=VoidTransform(), direction=SortDirection.ASC), + order_id=22, + ) + + +def test_serialize_sort_order_unsorted() -> None: + assert UNSORTED_SORT_ORDER.model_dump_json() == '{"order-id":0,"fields":[]}' + + +def test_serialize_sort_order(sort_order: SortOrder) -> None: + expected = '{"order-id":22,"fields":[{"source-id":19,"transform":"identity","direction":"asc","null-order":"nulls-first"},{"source-id":25,"transform":"bucket[4]","direction":"desc","null-order":"nulls-last"},{"source-id":22,"transform":"void","direction":"asc","null-order":"nulls-first"}]}' + assert sort_order.model_dump_json() == expected + + +def test_deserialize_sort_order(sort_order: SortOrder) -> None: + payload = '{"order-id": 22, "fields": [{"source-id": 19, "transform": "identity", "direction": "asc", "null-order": "nulls-first"}, {"source-id": 25, "transform": "bucket[4]", "direction": "desc", "null-order": "nulls-last"}, {"source-id": 22, "transform": "void", "direction": "asc", "null-order": "nulls-first"}]}' + + assert SortOrder.model_validate_json(payload) == sort_order + + +def test_sorting_schema(example_table_metadata_v2: Dict[str, Any]) -> None: + table_metadata = TableMetadataUtil.parse_raw(json.dumps(example_table_metadata_v2)) + + assert table_metadata.sort_orders == [ + SortOrder( + SortField(2, IdentityTransform(), SortDirection.ASC, null_order=NullOrder.NULLS_FIRST), + SortField( + 3, + BucketTransform(4), + direction=SortDirection.DESC, + null_order=NullOrder.NULLS_LAST, + ), + order_id=3, + ) + ] + + +def test_sorting_to_string(sort_order: SortOrder) -> None: + expected = """[ + 19 ASC NULLS FIRST + bucket[4](25) DESC NULLS LAST + void(22) ASC NULLS FIRST +]""" + assert str(sort_order) == expected + + +def test_sorting_to_repr(sort_order: SortOrder) -> None: + expected = """SortOrder(SortField(source_id=19, transform=IdentityTransform(), direction=SortDirection.ASC, null_order=NullOrder.NULLS_FIRST), SortField(source_id=25, transform=BucketTransform(num_buckets=4), direction=SortDirection.DESC, null_order=NullOrder.NULLS_LAST), SortField(source_id=22, transform=VoidTransform(), direction=SortDirection.ASC, null_order=NullOrder.NULLS_FIRST), order_id=22)""" + assert repr(sort_order) == expected + + +def test_unsorting_to_repr() -> None: + expected = """SortOrder(order_id=0)""" + assert repr(UNSORTED_SORT_ORDER) == expected + + +def test_sorting_repr(sort_order: SortOrder) -> None: + """To make sure that the repr converts back to the original object""" + assert sort_order == eval(repr(sort_order)) diff --git a/tests/test_conversions.py b/tests/test_conversions.py new file mode 100644 index 0000000000..3b3e519579 --- /dev/null +++ b/tests/test_conversions.py @@ -0,0 +1,546 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +"""This test module tests PrimitiveType based conversions of values to/from bytes + +Notes: + Boolean: + - Stored as 0x00 for False and non-zero byte for True + Integer: + - Stored as 4 bytes in little-endian order + - 84202 is 0...01|01001000|11101010 in binary + Long: + - Stored as 8 bytes in little-endian order + - 200L is 0...0|11001000 in binary + - 11001000 -> 200 (-56), 00000000 -> 0, ... , 00000000 -> 0 + Double: + - Stored as 8 bytes in little-endian order + - floating point numbers are represented as sign * 2ˆexponent * mantissa + - 6.0 is 1 * 2ˆ4 * 1.5 and encoded as 01000000|00011000|0...0 + - 00000000 -> 0, ... , 00011000 -> 24, 01000000 -> 64 + Date: + - Stored as days from 1970-01-01 in a 4-byte little-endian int + - 1000 is 0...0|00000011|11101000 in binary + - 11101000 -> 232 (-24), 00000011 -> 3, ... , 00000000 -> 0 + Time: + - Stored as microseconds from midnight in an 8-byte little-endian long + - 10000L is 0...0|00100111|00010000 in binary + - 00010000 -> 16, 00100111 -> 39, ... , 00000000 -> 0 + Timestamp: + - Stored as microseconds from 1970-01-01 00:00:00.000000 in an 8-byte little-endian long + - 400000L is 0...110|00011010|10000000 in binary + - 10000000 -> 128 (-128), 00011010 -> 26, 00000110 -> 6, ... , 00000000 -> 0 + String: + - Stored as UTF-8 bytes (without length) + - 'A' -> 65, 'B' -> 66, 'C' -> 67 + UUID: + - Stored as 16-byte big-endian values + - f79c3e09-677c-4bbd-a479-3f349cb785e7 is encoded as F7 9C 3E 09 67 7C 4B BD A4 79 3F 34 9C B7 85 E7 + - 0xF7 -> 11110111 -> 247 (-9), 0x9C -> 10011100 -> 156 (-100), 0x3E -> 00111110 -> 62, + - 0x09 -> 00001001 -> 9, 0x67 -> 01100111 -> 103, 0x7C -> 01111100 -> 124, + - 0x4B -> 01001011 -> 75, 0xBD -> 10111101 -> 189 (-67), 0xA4 -> 10100100 -> 164 (-92), + - 0x79 -> 01111001 -> 121, 0x3F -> 00111111 -> 63, 0x34 -> 00110100 -> 52, + - 0x9C -> 10011100 -> 156 (-100), 0xB7 -> 10110111 -> 183 (-73), 0x85 -> 10000101 -> 133 (-123), + - 0xE7 -> 11100111 -> 231 (-25) + Fixed: + - Stored directly + - 'a' -> 97, 'b' -> 98 + Binary: + - Stored directly + - 'Z' -> 90 + Decimal: + - Stored as unscaled values in the form of two's-complement big-endian binary using the minimum number of bytes for the values + - 345 is 0...1|01011001 in binary + - 00000001 -> 1, 01011001 -> 89 + Float: + - Stored as 4 bytes in little-endian order + - floating point numbers are represented as sign * 2ˆexponent * mantissa + - -4.5F is -1 * 2ˆ2 * 1.125 and encoded as 11000000|10010000|0...0 in binary + - 00000000 -> 0, 00000000 -> 0, 10010000 -> 144 (-112), 11000000 -> 192 (-64), +""" +import struct +import uuid +from datetime import ( + date, + datetime, + time, + timezone, +) +from decimal import Decimal +from typing import Any, Union + +import pytest + +import pyiceberg.utils.decimal as decimal_util +from pyiceberg import conversions +from pyiceberg.types import ( + BinaryType, + BooleanType, + DateType, + DecimalType, + DoubleType, + FixedType, + FloatType, + IntegerType, + LongType, + PrimitiveType, + StringType, + TimestampType, + TimestamptzType, + TimeType, + UUIDType, +) + + +@pytest.mark.parametrize( + "value, expected_result", + [ + (Decimal("1.2345"), 12345), + (Decimal("12.345"), 12345), + (Decimal("1234.5"), 12345), + (Decimal("9999999.9999"), 99999999999), + (Decimal("1.0"), 10), + (Decimal("1"), 1), + (Decimal("0.1"), 1), + (Decimal("0.12345"), 12345), + (Decimal("0.0000001"), 1), + ], +) +def test_decimal_to_unscaled(value: Decimal, expected_result: int) -> None: + """Test converting a decimal to an unscaled value""" + assert decimal_util.decimal_to_unscaled(value=value) == expected_result + + +@pytest.mark.parametrize( + "unscaled, scale, expected_result", + [ + (12345, 4, Decimal("1.2345")), + (12345, 3, Decimal("12.345")), + (12345, 1, Decimal("1234.5")), + (99999999999, 4, Decimal("9999999.9999")), + (1, 1, Decimal("0.1")), + (1, 0, Decimal("1")), + (12345, 5, Decimal("0.12345")), + (1, 7, Decimal("0.0000001")), + ], +) +def test_unscaled_to_decimal(unscaled: int, scale: int, expected_result: Decimal) -> None: + """Test converting an unscaled value to a decimal with a specified scale""" + assert decimal_util.unscaled_to_decimal(unscaled=unscaled, scale=scale) == expected_result + + +@pytest.mark.parametrize( + "primitive_type, value_str, expected_result", + [ + (BooleanType(), "true", True), + (BooleanType(), "false", False), + (BooleanType(), "TRUE", True), + (BooleanType(), "FALSE", False), + (IntegerType(), "1", 1), + (IntegerType(), "9999", 9999), + (LongType(), "123456789", 123456789), + (FloatType(), "1.1", 1.1), + (DoubleType(), "99999.9", 99999.9), + (DecimalType(5, 2), "123.45", Decimal("123.45")), + (StringType(), "foo", "foo"), + (UUIDType(), "f79c3e09-677c-4bbd-a479-3f349cb785e7", uuid.UUID("f79c3e09-677c-4bbd-a479-3f349cb785e7")), + (FixedType(3), "foo", b"foo"), + (BinaryType(), "foo", b"foo"), + ], +) +def test_partition_to_py(primitive_type: PrimitiveType, value_str: str, expected_result: Any) -> None: + """Test converting a partition value to a python built-in""" + assert conversions.partition_to_py(primitive_type, value_str) == expected_result + + +@pytest.mark.parametrize( + "primitive_type", + [ + (BinaryType()), + (BooleanType()), + (DateType()), + (DecimalType(2, 1)), + (DoubleType()), + (FixedType(1)), + (FloatType()), + (IntegerType()), + (LongType()), + (StringType()), + (TimestampType()), + (TimestamptzType()), + (TimeType()), + (UUIDType()), + ], +) +def test_none_partition_values(primitive_type: PrimitiveType) -> None: + """Test converting a partition value to a python built-in""" + assert conversions.partition_to_py(primitive_type, None) is None # type: ignore + + +@pytest.mark.parametrize( + "primitive_type", + [ + (BinaryType()), + (BooleanType()), + (DateType()), + (DecimalType(2, 1)), + (DoubleType()), + (FixedType(1)), + (FloatType()), + (IntegerType()), + (LongType()), + (StringType()), + (TimestampType()), + (TimestamptzType()), + (TimeType()), + (UUIDType()), + ], +) +def test_hive_default_partition_values(primitive_type: PrimitiveType) -> None: + """Test converting a partition value to a python built-in""" + assert conversions.partition_to_py(primitive_type, "__HIVE_DEFAULT_PARTITION__") is None + + +@pytest.mark.parametrize( + "primitive_type, value, should_raise", + [ + (IntegerType(), "123.45", True), + (IntegerType(), "1234567.89", True), + (IntegerType(), "123.00", True), + (IntegerType(), "1234567.00", True), + (LongType(), "123.45", True), + (LongType(), "1234567.89", True), + (LongType(), "123.00", True), + (LongType(), "1234567.00", True), + (IntegerType(), "12345", False), + (IntegerType(), "123456789", False), + (IntegerType(), "12300", False), + (IntegerType(), "123456700", False), + (LongType(), "12345", False), + (LongType(), "123456789", False), + (LongType(), "12300", False), + (LongType(), "123456700", False), + ], +) +def test_partition_to_py_raise_on_incorrect_precision_or_scale( + primitive_type: PrimitiveType, value: str, should_raise: bool +) -> None: + if should_raise: + with pytest.raises(ValueError) as exc_info: + conversions.partition_to_py(primitive_type, value) + + assert f"Cannot convert partition value, value cannot have fractional digits for {primitive_type} partition" in str( + exc_info.value + ) + else: + conversions.partition_to_py(primitive_type, value) + + +@pytest.mark.parametrize( + "primitive_type, b, result", + [ + (BooleanType(), b"\x00", False), + (BooleanType(), b"\x01", True), + (IntegerType(), b"\xd2\x04\x00\x00", 1234), + (LongType(), b"\xd2\x04\x00\x00\x00\x00\x00\x00", 1234), + (DoubleType(), b"\x8d\x97\x6e\x12\x83\xc0\xf3\x3f", 1.2345), + (DateType(), b"\xe8\x03\x00\x00", 1000), + (DateType(), b"\xd2\x04\x00\x00", 1234), + (TimeType(), b"\x10'\x00\x00\x00\x00\x00\x00", 10000), + (TimeType(), b"\x00\xe8vH\x17\x00\x00\x00", 100000000000), + (TimestamptzType(), b"\x80\x1a\x06\x00\x00\x00\x00\x00", 400000), + (TimestamptzType(), b"\x00\xe8vH\x17\x00\x00\x00", 100000000000), + (TimestampType(), b"\x80\x1a\x06\x00\x00\x00\x00\x00", 400000), + (TimestampType(), b"\x00\xe8vH\x17\x00\x00\x00", 100000000000), + (StringType(), b"ABC", "ABC"), + (StringType(), b"foo", "foo"), + ( + UUIDType(), + b"\xf7\x9c>\tg|K\xbd\xa4y?4\x9c\xb7\x85\xe7", + b"\xf7\x9c>\tg|K\xbd\xa4y?4\x9c\xb7\x85\xe7", + ), + (UUIDType(), b"\xf7\x9c>\tg|K\xbd\xa4y?4\x9c\xb7\x85\xe7", b"\xf7\x9c>\tg|K\xbd\xa4y?4\x9c\xb7\x85\xe7"), + (FixedType(3), b"foo", b"foo"), + (BinaryType(), b"foo", b"foo"), + (DecimalType(5, 2), b"\x30\x39", Decimal("123.45")), + (DecimalType(7, 4), b"\x12\xd6\x87", Decimal("123.4567")), + (DecimalType(7, 4), b"\xff\xed\x29\x79", Decimal("-123.4567")), + ], +) +def test_from_bytes(primitive_type: PrimitiveType, b: bytes, result: Any) -> None: + """Test converting from bytes""" + assert conversions.from_bytes(primitive_type, b) == result + + +@pytest.mark.parametrize( + "primitive_type, b, result", + [ + (BooleanType(), b"\x00", False), + (BooleanType(), b"\x01", True), + (IntegerType(), b"\xeaH\x01\x00", 84202), + (IntegerType(), b"\xd2\x04\x00\x00", 1234), + (LongType(), b"\xc8\x00\x00\x00\x00\x00\x00\x00", 200), + (LongType(), b"\xd2\x04\x00\x00\x00\x00\x00\x00", 1234), + (DoubleType(), b"\x00\x00\x00\x00\x00\x00\x18@", 6.0), + (DoubleType(), b"\x8d\x97n\x12\x83\xc0\xf3?", 1.2345), + (DateType(), b"\xe8\x03\x00\x00", 1000), + (DateType(), b"\xd2\x04\x00\x00", 1234), + (TimeType(), b"\x10'\x00\x00\x00\x00\x00\x00", 10000), + (TimeType(), b"\x00\xe8vH\x17\x00\x00\x00", 100000000000), + (TimestamptzType(), b"\x80\x1a\x06\x00\x00\x00\x00\x00", 400000), + (TimestamptzType(), b"\x00\xe8vH\x17\x00\x00\x00", 100000000000), + (TimestampType(), b"\x00\xe8vH\x17\x00\x00\x00", 100000000000), + (StringType(), b"ABC", "ABC"), + (StringType(), b"foo", "foo"), + ( + UUIDType(), + b"\xf7\x9c>\tg|K\xbd\xa4y?4\x9c\xb7\x85\xe7", + b"\xf7\x9c>\tg|K\xbd\xa4y?4\x9c\xb7\x85\xe7", + ), + (UUIDType(), b"\xf7\x9c>\tg|K\xbd\xa4y?4\x9c\xb7\x85\xe7", b"\xf7\x9c>\tg|K\xbd\xa4y?4\x9c\xb7\x85\xe7"), + (FixedType(3), b"foo", b"foo"), + (BinaryType(), b"foo", b"foo"), + (DecimalType(5, 2), b"\x30\x39", Decimal("123.45")), + (DecimalType(3, 2), b"\x01Y", Decimal("3.45")), + # decimal on 3-bytes to test that we use the minimum number of bytes and not a power of 2 + # 1234567 is 00010010|11010110|10000111 in binary + # 00010010 -> 18, 11010110 -> 214, 10000111 -> 135 + (DecimalType(7, 4), b"\x12\xd6\x87", Decimal("123.4567")), + # negative decimal to test two's complement + # -1234567 is 11101101|00101001|01111001 in binary + # 11101101 -> 237, 00101001 -> 41, 01111001 -> 121 + (DecimalType(7, 4), b"\xed)y", Decimal("-123.4567")), + # test empty byte in decimal + # 11 is 00001011 in binary + # 00001011 -> 11 + (DecimalType(10, 3), b"\x0b", Decimal("0.011")), + (DecimalType(4, 2), b"\x04\xd2", Decimal("12.34")), + (FloatType(), b"\x00\x00\x90\xc0", struct.unpack(" None: + """Test round trip conversions of calling `conversions.from_bytes` and then `conversions.to_bytes` on the result""" + value_from_bytes = conversions.from_bytes(primitive_type, b) + assert value_from_bytes == result + + bytes_from_value = conversions.to_bytes(primitive_type, value_from_bytes) + assert bytes_from_value == b + + +@pytest.mark.parametrize( + "primitive_type, v, result", + [ + ( + UUIDType(), + uuid.UUID("f79c3e09-677c-4bbd-a479-3f349cb785e7"), + b"\xf7\x9c>\tg|K\xbd\xa4y?4\x9c\xb7\x85\xe7", + ), + (UUIDType(), uuid.UUID("f79c3e09-677c-4bbd-a479-3f349cb785e7"), b"\xf7\x9c>\tg|K\xbd\xa4y?4\x9c\xb7\x85\xe7"), + ], +) +def test_uuid_to_bytes(primitive_type: PrimitiveType, v: Any, result: bytes) -> None: + bytes_from_value = conversions.to_bytes(primitive_type, v) + assert bytes_from_value == result + + +@pytest.mark.parametrize( + "primitive_type, b, result", + [ + ( + DecimalType(38, 21), + b"\tI\xb0\xf7\x13\xe9\x180s\xb9\x1e~\xa2\xb3j\x83", + Decimal("12345678912345678.123456789123456789123"), + ), + (DecimalType(38, 22), b'\tI\xb0\xf7\x13\xe9\x16\xbb\x01/L\xc3+B)"', Decimal("1234567891234567.1234567891234567891234")), + ( + DecimalType(38, 23), + b"\tI\xb0\xf7\x13\xe9\nB\xa1\xad\xe5+3\x15\x9bY", + Decimal("123456789123456.12345678912345678912345"), + ), + ( + DecimalType(38, 24), + b"\tI\xb0\xf7\x13\xe8\xa2\xbb\xe9g\xba\x86w\xd8\x11\x80", + Decimal("12345678912345.123456789123456789123456"), + ), + ( + DecimalType(38, 25), + b"\tI\xb0\xf7\x13\xe5k:\xd2x\xdd\x04\xc8p\xaf\x07", + Decimal("1234567891234.1234567891234567891234567"), + ), + (DecimalType(38, 26), b"\tI\xb0\xf7\x13\xcd\x85\xc5\x0387<8f\xd6N", Decimal("123456789123.12345678912345678912345678")), + (DecimalType(38, 27), b"\tI\xb0\xf7\x131F\xfd\xc7y\xca9|\x04_\x15", Decimal("12345678912.123456789123456789123456789")), + ( + DecimalType(38, 28), + b"\tI\xb0\xf7\x10R\x01r\x11\xda\x08[\x08+\xb6\xd3", + Decimal("1234567891.1234567891234567891234567891"), + ), + ( + DecimalType(38, 29), + b"\tI\xb0\xf7\x13\xe9\x18[7\xc1x\x0b\x91\xb5$@", + Decimal("123456789.12345678912345678912345678912"), + ), + ( + DecimalType(38, 30), + b"\tI\xb0\xed\x1e\xdf\x80\x03G;\x16\x9b\xf1\x13j\x83", + Decimal("12345678.123456789123456789123456789123"), + ), + (DecimalType(38, 31), b'\tI\xb0\x96+\xac)d(p6)\xea\xc2)"', Decimal("1234567.1234567891234567891234567891234")), + ( + DecimalType(38, 32), + b"\tI\xad\xae\xe3h\xe7O\xb5\x14\xbc\xdc+\x95\x9bY", + Decimal("123456.12345678912345678912345678912345"), + ), + ( + DecimalType(38, 33), + b"\tI\x95\x94>5\x93\xde\xb9.\xefS\xb3\xd8\x11\x80", + Decimal("12345.123456789123456789123456789123456"), + ), + ( + DecimalType(38, 34), + b"\tH\xd5\xd7\x90x\xdf\x08\x1a\xf6C\t\x06p\xaf\x07", + Decimal("1234.1234567891234567891234567891234567"), + ), + (DecimalType(38, 35), b"\tCE\x82\x85\xc7Vf$M\x16\x82@f\xd6N", Decimal("123.12345678912345678912345678912345678")), + (DecimalType(21, 16), b"\x06\xb1:\xe3\xc4N\x94\xaf\x07", Decimal("12345.1234567891234567")), + (DecimalType(22, 17), b"B\xecL\xe5\xab\x11\xce\xd6N", Decimal("12345.12345678912345678")), + (DecimalType(23, 18), b"\x02\x9d;\x00\xf8\xae\xb2\x14_\x15", Decimal("12345.123456789123456789")), + (DecimalType(24, 19), b"\x1a$N\t\xb6\xd2\xf4\xcb\xb6\xd3", Decimal("12345.1234567891234567891")), + (DecimalType(25, 20), b"\x01\x05k\x0ca$=\x8f\xf5$@", Decimal("12345.12345678912345678912")), + (DecimalType(26, 21), b"\n6.{\xcbjg\x9f\x93j\x83", Decimal("12345.123456789123456789123")), + (DecimalType(27, 22), b'f\x1d\xd0\xd5\xf2(\x0c;\xc2)"', Decimal("12345.1234567891234567891234")), + (DecimalType(28, 23), b"\x03\xfd*([u\x90zU\x95\x9bY", Decimal("12345.12345678912345678912345")), + (DecimalType(29, 24), b"'\xe3\xa5\x93\x92\x97\xa4\xc7W\xd8\x11\x80", Decimal("12345.123456789123456789123456")), + (DecimalType(30, 25), b"\x01\x8e\xe4w\xc3\xb9\xeco\xc9np\xaf\x07", Decimal("12345.1234567891234567891234567")), + (DecimalType(31, 26), b"\x0f\x94\xec\xad\xa5C<]\xdePf\xd6N", Decimal("12345.12345678912345678912345678")), + ], +) +def test_round_trip_conversion_large_decimals(primitive_type: PrimitiveType, b: bytes, result: Any) -> None: + """Test round trip conversions of calling `conversions.from_bytes` and then `conversions.to_bytes` on the result""" + value_from_bytes = conversions.from_bytes(primitive_type, b) + assert value_from_bytes == result + + bytes_from_value = conversions.to_bytes(primitive_type, value_from_bytes) + assert bytes_from_value == b + + +@pytest.mark.parametrize( + "primitive_type, expected_max_value", + [ + (DecimalType(6, 2), Decimal("9999.99")), + (DecimalType(10, 10), Decimal(".9999999999")), + (DecimalType(2, 1), Decimal("9.9")), + (DecimalType(38, 37), Decimal("9.9999999999999999999999999999999999999")), + (DecimalType(20, 1), Decimal("9999999999999999999.9")), + ], +) +def test_max_value_round_trip_conversion(primitive_type: DecimalType, expected_max_value: Decimal) -> None: + """Test round trip conversions of maximum DecimalType values""" + b = conversions.to_bytes(primitive_type, expected_max_value) + value_from_bytes = conversions.from_bytes(primitive_type, b) + + assert value_from_bytes == expected_max_value + + +@pytest.mark.parametrize( + "primitive_type, expected_min_value", + [ + (DecimalType(6, 2), Decimal("-9999.99")), + (DecimalType(10, 10), Decimal("-.9999999999")), + (DecimalType(2, 1), Decimal("-9.9")), + (DecimalType(38, 37), Decimal("-9.9999999999999999999999999999999999999")), + (DecimalType(20, 1), Decimal("-9999999999999999999.9")), + ], +) +def test_min_value_round_trip_conversion(primitive_type: DecimalType, expected_min_value: Decimal) -> None: + """Test round trip conversions of minimum DecimalType values""" + b = conversions.to_bytes(primitive_type, expected_min_value) + value_from_bytes = conversions.from_bytes(primitive_type, b) + + assert value_from_bytes == expected_min_value + + +def test_raise_on_unregistered_type() -> None: + """Test raising when a conversion is attempted for a type that has no registered method""" + + class FooUnknownType: + def __repr__(self) -> str: + return "FooUnknownType()" + + with pytest.raises(TypeError) as exc_info: + conversions.partition_to_py(FooUnknownType(), "foo") # type: ignore + assert "Cannot convert 'foo' to unsupported type: FooUnknownType()" in str(exc_info.value) + + with pytest.raises(TypeError) as exc_info: + conversions.to_bytes(FooUnknownType(), "foo") # type: ignore + assert "scale does not match FooUnknownType()" in str(exc_info.value) + + with pytest.raises(TypeError) as exc_info: + conversions.from_bytes(FooUnknownType(), b"foo") # type: ignore + assert "Cannot deserialize bytes, type FooUnknownType() not supported: b'foo'" in str(exc_info.value) + + +@pytest.mark.parametrize( + "primitive_type, value, expected_error_message", + [ + (DecimalType(7, 3), Decimal("123.4567"), "Cannot serialize value, scale of value does not match type decimal(7, 3): 4"), + ( + DecimalType(18, 8), + Decimal("123456789.123456789"), + "Cannot serialize value, scale of value does not match type decimal(18, 8): 9", + ), + ( + DecimalType(36, 34), + Decimal("1.23456789123456789123456789123456789"), + "Cannot serialize value, scale of value does not match type decimal(36, 34): 35", + ), + ( + DecimalType(7, 2), + Decimal("1234567.89"), + "Cannot serialize value, precision of value is greater than precision of type decimal(7, 2): 9", + ), + ( + DecimalType(17, 9), + Decimal("123456789.123456789"), + "Cannot serialize value, precision of value is greater than precision of type decimal(17, 9): 18", + ), + ( + DecimalType(35, 35), + Decimal("1.23456789123456789123456789123456789"), + "Cannot serialize value, precision of value is greater than precision of type decimal(35, 35): 36", + ), + ], +) +def test_raise_on_incorrect_precision_or_scale(primitive_type: DecimalType, value: Decimal, expected_error_message: str) -> None: + with pytest.raises(ValueError) as exc_info: + conversions.to_bytes(primitive_type, value) + + assert expected_error_message in str(exc_info.value) + + +@pytest.mark.parametrize( + "primitive_type, value, expected_bytes", + [ + (TimestampType(), datetime(2023, 3, 1, 19, 25, 0), b"\x00\xbb\r\xab\xdb\xf5\x05\x00"), + (TimestamptzType(), datetime(2023, 3, 1, 19, 25, 0, tzinfo=timezone.utc), b"\x00\xbb\r\xab\xdb\xf5\x05\x00"), + (DateType(), date(2023, 3, 1), b"\xd9K\x00\x00"), + (TimeType(), time(12, 30, 45, 500000), b"`\xc8\xeb|\n\x00\x00\x00"), + ], +) +def test_datetime_obj_to_bytes(primitive_type: PrimitiveType, value: Union[datetime, date, time], expected_bytes: bytes) -> None: + bytes_from_value = conversions.to_bytes(primitive_type, value) + + assert bytes_from_value == expected_bytes diff --git a/tests/test_integration.py b/tests/test_integration.py new file mode 100644 index 0000000000..297749b1b7 --- /dev/null +++ b/tests/test_integration.py @@ -0,0 +1,366 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# pylint:disable=redefined-outer-name + +import math +import uuid +from urllib.parse import urlparse + +import pyarrow.parquet as pq +import pytest +from pyarrow.fs import S3FileSystem + +from pyiceberg.catalog import Catalog, load_catalog +from pyiceberg.exceptions import NoSuchTableError +from pyiceberg.expressions import ( + And, + EqualTo, + GreaterThanOrEqual, + IsNaN, + LessThan, + NotEqualTo, + NotNaN, +) +from pyiceberg.io.pyarrow import pyarrow_to_schema +from pyiceberg.schema import Schema +from pyiceberg.table import Table +from pyiceberg.types import ( + BooleanType, + IntegerType, + NestedField, + StringType, + TimestampType, +) + + +@pytest.fixture() +def catalog() -> Catalog: + return load_catalog( + "local", + **{ + "type": "rest", + "uri": "http://localhost:8181", + "s3.endpoint": "http://localhost:9000", + "s3.access-key-id": "admin", + "s3.secret-access-key": "password", + }, + ) + + +@pytest.fixture() +def table_test_null_nan(catalog: Catalog) -> Table: + return catalog.load_table("default.test_null_nan") + + +@pytest.fixture() +def table_test_null_nan_rewritten(catalog: Catalog) -> Table: + return catalog.load_table("default.test_null_nan_rewritten") + + +@pytest.fixture() +def table_test_limit(catalog: Catalog) -> Table: + return catalog.load_table("default.test_limit") + + +@pytest.fixture() +def table_test_all_types(catalog: Catalog) -> Table: + return catalog.load_table("default.test_all_types") + + +TABLE_NAME = ("default", "t1") + + +@pytest.fixture() +def table(catalog: Catalog) -> Table: + try: + catalog.drop_table(TABLE_NAME) + except NoSuchTableError: + pass # Just to make sure that the table doesn't exist + + schema = Schema( + NestedField(field_id=1, name="str", field_type=StringType(), required=False), + NestedField(field_id=2, name="int", field_type=IntegerType(), required=True), + NestedField(field_id=3, name="bool", field_type=BooleanType(), required=False), + NestedField(field_id=4, name="datetime", field_type=TimestampType(), required=False), + schema_id=1, + ) + + return catalog.create_table(identifier=TABLE_NAME, schema=schema) + + +@pytest.mark.integration +def test_table_properties(table: Table) -> None: + assert table.properties == {} + + with table.transaction() as transaction: + transaction.set_properties(abc="🤪") + + assert table.properties == {"abc": "🤪"} + + with table.transaction() as transaction: + transaction.remove_properties("abc") + + assert table.properties == {} + + table = table.transaction().set_properties(abc="def").commit_transaction() + + assert table.properties == {"abc": "def"} + + table = table.transaction().remove_properties("abc").commit_transaction() + + assert table.properties == {} + + +@pytest.fixture() +def test_positional_mor_deletes(catalog: Catalog) -> Table: + """Table that has positional deletes""" + return catalog.load_table("default.test_positional_mor_deletes") + + +@pytest.fixture() +def test_positional_mor_double_deletes(catalog: Catalog) -> Table: + """Table that has multiple positional deletes""" + return catalog.load_table("default.test_positional_mor_double_deletes") + + +@pytest.mark.integration +def test_pyarrow_nan(table_test_null_nan: Table) -> None: + arrow_table = table_test_null_nan.scan(row_filter=IsNaN("col_numeric"), selected_fields=("idx", "col_numeric")).to_arrow() + assert len(arrow_table) == 1 + assert arrow_table["idx"][0].as_py() == 1 + assert math.isnan(arrow_table["col_numeric"][0].as_py()) + + +@pytest.mark.integration +def test_pyarrow_nan_rewritten(table_test_null_nan_rewritten: Table) -> None: + arrow_table = table_test_null_nan_rewritten.scan( + row_filter=IsNaN("col_numeric"), selected_fields=("idx", "col_numeric") + ).to_arrow() + assert len(arrow_table) == 1 + assert arrow_table["idx"][0].as_py() == 1 + assert math.isnan(arrow_table["col_numeric"][0].as_py()) + + +@pytest.mark.integration +@pytest.mark.skip(reason="Fixing issues with NaN's: https://github.com/apache/arrow/issues/34162") +def test_pyarrow_not_nan_count(table_test_null_nan: Table) -> None: + not_nan = table_test_null_nan.scan(row_filter=NotNaN("col_numeric"), selected_fields=("idx",)).to_arrow() + assert len(not_nan) == 2 + + +@pytest.mark.integration +def test_duckdb_nan(table_test_null_nan_rewritten: Table) -> None: + con = table_test_null_nan_rewritten.scan().to_duckdb("table_test_null_nan") + result = con.query("SELECT idx, col_numeric FROM table_test_null_nan WHERE isnan(col_numeric)").fetchone() + assert result[0] == 1 + assert math.isnan(result[1]) + + +@pytest.mark.integration +def test_pyarrow_limit(table_test_limit: Table) -> None: + limited_result = table_test_limit.scan(selected_fields=("idx",), limit=1).to_arrow() + assert len(limited_result) == 1 + + empty_result = table_test_limit.scan(selected_fields=("idx",), limit=0).to_arrow() + assert len(empty_result) == 0 + + full_result = table_test_limit.scan(selected_fields=("idx",), limit=999).to_arrow() + assert len(full_result) == 10 + + +@pytest.mark.integration +def test_ray_nan(table_test_null_nan_rewritten: Table) -> None: + ray_dataset = table_test_null_nan_rewritten.scan().to_ray() + assert ray_dataset.count() == 3 + assert math.isnan(ray_dataset.take()[0]["col_numeric"]) + + +@pytest.mark.integration +def test_ray_nan_rewritten(table_test_null_nan_rewritten: Table) -> None: + ray_dataset = table_test_null_nan_rewritten.scan( + row_filter=IsNaN("col_numeric"), selected_fields=("idx", "col_numeric") + ).to_ray() + assert ray_dataset.count() == 1 + assert ray_dataset.take()[0]["idx"] == 1 + assert math.isnan(ray_dataset.take()[0]["col_numeric"]) + + +@pytest.mark.integration +@pytest.mark.skip(reason="Fixing issues with NaN's: https://github.com/apache/arrow/issues/34162") +def test_ray_not_nan_count(table_test_null_nan_rewritten: Table) -> None: + ray_dataset = table_test_null_nan_rewritten.scan(row_filter=NotNaN("col_numeric"), selected_fields=("idx",)).to_ray() + print(ray_dataset.take()) + assert ray_dataset.count() == 2 + + +@pytest.mark.integration +def test_ray_all_types(table_test_all_types: Table) -> None: + ray_dataset = table_test_all_types.scan().to_ray() + pandas_dataframe = table_test_all_types.scan().to_pandas() + assert ray_dataset.count() == pandas_dataframe.shape[0] + assert pandas_dataframe.equals(ray_dataset.to_pandas()) + + +@pytest.mark.integration +def test_pyarrow_to_iceberg_all_types(table_test_all_types: Table) -> None: + fs = S3FileSystem( + **{ + "endpoint_override": "http://localhost:9000", + "access_key": "admin", + "secret_key": "password", + } + ) + data_file_paths = [task.file.file_path for task in table_test_all_types.scan().plan_files()] + for data_file_path in data_file_paths: + uri = urlparse(data_file_path) + with fs.open_input_file(f"{uri.netloc}{uri.path}") as fout: + parquet_schema = pq.read_schema(fout) + stored_iceberg_schema = Schema.model_validate_json(parquet_schema.metadata.get(b"iceberg.schema")) + converted_iceberg_schema = pyarrow_to_schema(parquet_schema) + assert converted_iceberg_schema == stored_iceberg_schema + + +@pytest.mark.integration +def test_pyarrow_deletes(test_positional_mor_deletes: Table) -> None: + # number, letter + # (1, 'a'), + # (2, 'b'), + # (3, 'c'), + # (4, 'd'), + # (5, 'e'), + # (6, 'f'), + # (7, 'g'), + # (8, 'h'), + # (9, 'i'), <- deleted + # (10, 'j'), + # (11, 'k'), + # (12, 'l') + arrow_table = test_positional_mor_deletes.scan().to_arrow() + assert arrow_table["number"].to_pylist() == [1, 2, 3, 4, 5, 6, 7, 8, 10, 11, 12] + + # Checking the filter + arrow_table = test_positional_mor_deletes.scan( + row_filter=And(GreaterThanOrEqual("letter", "e"), LessThan("letter", "k")) + ).to_arrow() + assert arrow_table["number"].to_pylist() == [5, 6, 7, 8, 10] + + # Testing the combination of a filter and a limit + arrow_table = test_positional_mor_deletes.scan( + row_filter=And(GreaterThanOrEqual("letter", "e"), LessThan("letter", "k")), limit=1 + ).to_arrow() + assert arrow_table["number"].to_pylist() == [5] + + # Testing the slicing of indices + arrow_table = test_positional_mor_deletes.scan(limit=3).to_arrow() + assert arrow_table["number"].to_pylist() == [1, 2, 3] + + +@pytest.mark.integration +def test_pyarrow_deletes_double(test_positional_mor_double_deletes: Table) -> None: + # number, letter + # (1, 'a'), + # (2, 'b'), + # (3, 'c'), + # (4, 'd'), + # (5, 'e'), + # (6, 'f'), <- second delete + # (7, 'g'), + # (8, 'h'), + # (9, 'i'), <- first delete + # (10, 'j'), + # (11, 'k'), + # (12, 'l') + arrow_table = test_positional_mor_double_deletes.scan().to_arrow() + assert arrow_table["number"].to_pylist() == [1, 2, 3, 4, 5, 7, 8, 10, 11, 12] + + # Checking the filter + arrow_table = test_positional_mor_double_deletes.scan( + row_filter=And(GreaterThanOrEqual("letter", "e"), LessThan("letter", "k")) + ).to_arrow() + assert arrow_table["number"].to_pylist() == [5, 7, 8, 10] + + # Testing the combination of a filter and a limit + arrow_table = test_positional_mor_double_deletes.scan( + row_filter=And(GreaterThanOrEqual("letter", "e"), LessThan("letter", "k")), limit=1 + ).to_arrow() + assert arrow_table["number"].to_pylist() == [5] + + # Testing the slicing of indices + arrow_table = test_positional_mor_double_deletes.scan(limit=8).to_arrow() + assert arrow_table["number"].to_pylist() == [1, 2, 3, 4, 5, 7, 8, 10] + + +@pytest.mark.integration +def test_partitioned_tables(catalog: Catalog) -> None: + for table_name, predicate in [ + ("test_partitioned_by_identity", "ts >= '2023-03-05T00:00:00+00:00'"), + ("test_partitioned_by_years", "dt >= '2023-03-05'"), + ("test_partitioned_by_months", "dt >= '2023-03-05'"), + ("test_partitioned_by_days", "ts >= '2023-03-05T00:00:00+00:00'"), + ("test_partitioned_by_hours", "ts >= '2023-03-05T00:00:00+00:00'"), + ("test_partitioned_by_truncate", "letter >= 'e'"), + ("test_partitioned_by_bucket", "number >= '5'"), + ]: + table = catalog.load_table(f"default.{table_name}") + arrow_table = table.scan(selected_fields=("number",), row_filter=predicate).to_arrow() + assert set(arrow_table["number"].to_pylist()) == {5, 6, 7, 8, 9, 10, 11, 12}, f"Table {table_name}, predicate {predicate}" + + +@pytest.mark.integration +def test_unpartitioned_uuid_table(catalog: Catalog) -> None: + unpartitioned_uuid = catalog.load_table("default.test_uuid_and_fixed_unpartitioned") + arrow_table_eq = unpartitioned_uuid.scan(row_filter="uuid_col == '102cb62f-e6f8-4eb0-9973-d9b012ff0967'").to_arrow() + assert arrow_table_eq["uuid_col"].to_pylist() == [uuid.UUID("102cb62f-e6f8-4eb0-9973-d9b012ff0967").bytes] + + arrow_table_neq = unpartitioned_uuid.scan( + row_filter="uuid_col != '102cb62f-e6f8-4eb0-9973-d9b012ff0967' and uuid_col != '639cccce-c9d2-494a-a78c-278ab234f024'" + ).to_arrow() + assert arrow_table_neq["uuid_col"].to_pylist() == [ + uuid.UUID("ec33e4b2-a834-4cc3-8c4a-a1d3bfc2f226").bytes, + uuid.UUID("c1b0d8e0-0b0e-4b1e-9b0a-0e0b0d0c0a0b").bytes, + uuid.UUID("923dae77-83d6-47cd-b4b0-d383e64ee57e").bytes, + ] + + +@pytest.mark.integration +def test_unpartitioned_fixed_table(catalog: Catalog) -> None: + fixed_table = catalog.load_table("default.test_uuid_and_fixed_unpartitioned") + arrow_table_eq = fixed_table.scan(row_filter=EqualTo("fixed_col", b"1234567890123456789012345")).to_arrow() + assert arrow_table_eq["fixed_col"].to_pylist() == [b"1234567890123456789012345"] + + arrow_table_neq = fixed_table.scan( + row_filter=And( + NotEqualTo("fixed_col", b"1234567890123456789012345"), NotEqualTo("uuid_col", "c1b0d8e0-0b0e-4b1e-9b0a-0e0b0d0c0a0b") + ) + ).to_arrow() + assert arrow_table_neq["fixed_col"].to_pylist() == [ + b"1231231231231231231231231", + b"12345678901234567ass12345", + b"qweeqwwqq1231231231231111", + ] + + +@pytest.mark.integration +def test_scan_tag(test_positional_mor_deletes: Table) -> None: + arrow_table = test_positional_mor_deletes.scan().use_ref("tag_12").to_arrow() + assert arrow_table["number"].to_pylist() == [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12] + + +@pytest.mark.integration +def test_scan_branch(test_positional_mor_deletes: Table) -> None: + arrow_table = test_positional_mor_deletes.scan().use_ref("without_5").to_arrow() + assert arrow_table["number"].to_pylist() == [1, 2, 3, 4, 6, 7, 8, 9, 10, 11, 12] diff --git a/tests/test_integration_schema.py b/tests/test_integration_schema.py new file mode 100644 index 0000000000..f0ccb1b0e8 --- /dev/null +++ b/tests/test_integration_schema.py @@ -0,0 +1,2471 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# pylint:disable=redefined-outer-name + +import pytest + +from pyiceberg.catalog import Catalog, load_catalog +from pyiceberg.exceptions import CommitFailedException, NoSuchTableError, ValidationError +from pyiceberg.schema import Schema, prune_columns +from pyiceberg.table import Table, UpdateSchema +from pyiceberg.types import ( + BinaryType, + BooleanType, + DateType, + DecimalType, + DoubleType, + FixedType, + FloatType, + IntegerType, + ListType, + LongType, + MapType, + NestedField, + PrimitiveType, + StringType, + StructType, + TimestampType, + TimestamptzType, + TimeType, + UUIDType, +) + + +@pytest.fixture() +def catalog() -> Catalog: + return load_catalog( + "local", + **{ + "type": "rest", + "uri": "http://localhost:8181", + "s3.endpoint": "http://localhost:9000", + "s3.access-key-id": "admin", + "s3.secret-access-key": "password", + }, + ) + + +@pytest.fixture() +def simple_table(catalog: Catalog, table_schema_simple: Schema) -> Table: + return _create_table_with_schema(catalog, table_schema_simple) + + +def _create_table_with_schema(catalog: Catalog, schema: Schema) -> Table: + tbl_name = "default.test_schema_evolution" + try: + catalog.drop_table(tbl_name) + except NoSuchTableError: + pass + return catalog.create_table(identifier=tbl_name, schema=schema) + + +@pytest.mark.integration +def test_add_already_exists(catalog: Catalog, table_schema_nested: Schema) -> None: + table = _create_table_with_schema(catalog, table_schema_nested) + update = UpdateSchema(table) + + with pytest.raises(ValueError) as exc_info: + update.add_column("foo", IntegerType()) + assert "already exists: foo" in str(exc_info.value) + + with pytest.raises(ValueError) as exc_info: + update.add_column(path=("location", "latitude"), field_type=IntegerType()) + assert "already exists: location.latitude" in str(exc_info.value) + + +@pytest.mark.integration +def test_add_to_non_struct_type(catalog: Catalog, table_schema_simple: Schema) -> None: + table = _create_table_with_schema(catalog, table_schema_simple) + update = UpdateSchema(table) + with pytest.raises(ValueError) as exc_info: + update.add_column(path=("foo", "lat"), field_type=IntegerType()) + assert "Cannot add column 'lat' to non-struct type: foo" in str(exc_info.value) + + +@pytest.mark.integration +def test_schema_evolution_nested_field(catalog: Catalog) -> None: + schema = Schema( + NestedField( + field_id=1, + name="foo", + field_type=StructType(NestedField(2, name="bar", field_type=StringType(), required=False)), + required=False, + ), + ) + tbl = _create_table_with_schema(catalog, schema) + + assert tbl.schema() == schema + + with pytest.raises(ValidationError) as exc_info: + with tbl.transaction() as tx: + tx.update_schema().update_column("foo", StringType()).commit() + + assert "Cannot change column type: struct<2: bar: optional string> is not a primitive" in str(exc_info.value) + + +@pytest.mark.integration +def test_schema_evolution_via_transaction(catalog: Catalog) -> None: + schema = Schema( + NestedField(field_id=1, name="col_uuid", field_type=UUIDType(), required=False), + NestedField(field_id=2, name="col_fixed", field_type=FixedType(25), required=False), + ) + tbl = _create_table_with_schema(catalog, schema) + + assert tbl.schema() == schema + + with tbl.transaction() as tx: + tx.update_schema().add_column("col_string", StringType()).commit() + + assert tbl.schema() == Schema( + NestedField(field_id=1, name="col_uuid", field_type=UUIDType(), required=False), + NestedField(field_id=2, name="col_fixed", field_type=FixedType(25), required=False), + NestedField(field_id=3, name="col_string", field_type=StringType(), required=False), + ) + + tbl.update_schema().add_column("col_integer", IntegerType()).commit() + + assert tbl.schema() == Schema( + NestedField(field_id=1, name="col_uuid", field_type=UUIDType(), required=False), + NestedField(field_id=2, name="col_fixed", field_type=FixedType(25), required=False), + NestedField(field_id=3, name="col_string", field_type=StringType(), required=False), + NestedField(field_id=4, name="col_integer", field_type=IntegerType(), required=False), + ) + + with pytest.raises(CommitFailedException) as exc_info: + with tbl.transaction() as tx: + # Start a new update + schema_update = tx.update_schema() + + # Do a concurrent update + tbl.update_schema().add_column("col_long", LongType()).commit() + + # stage another update in the transaction + schema_update.add_column("col_double", DoubleType()).commit() + + assert "Requirement failed: current schema changed: expected id 2 != 3" in str(exc_info.value) + + assert tbl.schema() == Schema( + NestedField(field_id=1, name="col_uuid", field_type=UUIDType(), required=False), + NestedField(field_id=2, name="col_fixed", field_type=FixedType(25), required=False), + NestedField(field_id=3, name="col_string", field_type=StringType(), required=False), + NestedField(field_id=4, name="col_integer", field_type=IntegerType(), required=False), + NestedField(field_id=5, name="col_long", field_type=LongType(), required=False), + ) + + +@pytest.mark.integration +def test_schema_evolution_nested(catalog: Catalog) -> None: + nested_schema = Schema( + NestedField( + field_id=1, + name="location_lookup", + field_type=MapType( + key_id=10, + key_type=StringType(), + value_id=11, + value_type=StructType( + NestedField(field_id=110, name="x", field_type=FloatType(), required=False), + NestedField(field_id=111, name="y", field_type=FloatType(), required=False), + ), + element_required=True, + ), + required=True, + ), + NestedField( + field_id=2, + name="locations", + field_type=ListType( + element_id=20, + element_type=StructType( + NestedField(field_id=200, name="x", field_type=FloatType(), required=False), + NestedField(field_id=201, name="y", field_type=FloatType(), required=False), + ), + element_required=True, + ), + required=True, + ), + NestedField( + field_id=3, + name="person", + field_type=StructType( + NestedField(field_id=30, name="name", field_type=StringType(), required=False), + NestedField(field_id=31, name="age", field_type=IntegerType(), required=True), + ), + required=False, + ), + ) + + tbl = _create_table_with_schema(catalog, nested_schema) + + assert tbl.schema().highest_field_id == 12 + + with tbl.update_schema() as schema_update: + schema_update.add_column(("location_lookup", "z"), FloatType()) + schema_update.add_column(("locations", "z"), FloatType()) + schema_update.add_column(("person", "address"), StringType()) + + assert str(tbl.schema()) == str( + Schema( + NestedField( + field_id=1, + name="location_lookup", + field_type=MapType( + type="map", + key_id=4, + key_type=StringType(), + value_id=5, + value_type=StructType( + NestedField(field_id=6, name="x", field_type=FloatType(), required=False), + NestedField(field_id=7, name="y", field_type=FloatType(), required=False), + NestedField(field_id=13, name="z", field_type=FloatType(), required=False), + ), + value_required=True, + ), + required=True, + ), + NestedField( + field_id=2, + name="locations", + field_type=ListType( + type="list", + element_id=8, + element_type=StructType( + NestedField(field_id=9, name="x", field_type=FloatType(), required=False), + NestedField(field_id=10, name="y", field_type=FloatType(), required=False), + NestedField(field_id=14, name="z", field_type=FloatType(), required=False), + ), + element_required=True, + ), + required=True, + ), + NestedField( + field_id=3, + name="person", + field_type=StructType( + NestedField(field_id=11, name="name", field_type=StringType(), required=False), + NestedField(field_id=12, name="age", field_type=IntegerType(), required=True), + NestedField(field_id=15, name="address", field_type=StringType(), required=False), + ), + required=False, + ), + ) + ) + + +schema_nested = Schema( + NestedField(field_id=1, name="foo", field_type=StringType(), required=False), + NestedField(field_id=2, name="bar", field_type=IntegerType(), required=True), + NestedField(field_id=3, name="baz", field_type=BooleanType(), required=False), + NestedField( + field_id=4, + name="qux", + field_type=ListType(type="list", element_id=8, element_type=StringType(), element_required=True), + required=True, + ), + NestedField( + field_id=5, + name="quux", + field_type=MapType( + type="map", + key_id=9, + key_type=StringType(), + value_id=10, + value_type=MapType( + type="map", key_id=11, key_type=StringType(), value_id=12, value_type=IntegerType(), value_required=True + ), + value_required=True, + ), + required=True, + ), + NestedField( + field_id=6, + name="location", + field_type=ListType( + type="list", + element_id=13, + element_type=StructType( + NestedField(field_id=14, name="latitude", field_type=FloatType(), required=False), + NestedField(field_id=15, name="longitude", field_type=FloatType(), required=False), + ), + element_required=True, + ), + required=True, + ), + NestedField( + field_id=7, + name="person", + field_type=StructType( + NestedField(field_id=16, name="name", field_type=StringType(), required=False), + NestedField(field_id=17, name="age", field_type=IntegerType(), required=True), + ), + required=False, + ), + identifier_field_ids=[2], +) + + +@pytest.fixture() +def nested_table(catalog: Catalog) -> Table: + return _create_table_with_schema(catalog, schema_nested) + + +@pytest.mark.integration +def test_no_changes(simple_table: Table, table_schema_simple: Schema) -> None: + with simple_table.update_schema() as _: + pass + + assert simple_table.schema() == table_schema_simple + + +@pytest.mark.integration +def test_no_changes_empty_commit(simple_table: Table, table_schema_simple: Schema) -> None: + with simple_table.update_schema() as update: + # No updates, so this should be a noop + update.update_column(path="foo") + + assert simple_table.schema() == table_schema_simple + + +@pytest.mark.integration +def test_delete_field(simple_table: Table) -> None: + with simple_table.update_schema() as schema_update: + schema_update.delete_column("foo") + + assert simple_table.schema() == Schema( + # foo is missing 👍 + NestedField(field_id=2, name="bar", field_type=IntegerType(), required=True), + NestedField(field_id=3, name="baz", field_type=BooleanType(), required=False), + identifier_field_ids=[2], + ) + + +@pytest.mark.integration +def test_delete_field_case_insensitive(simple_table: Table) -> None: + with simple_table.update_schema(case_sensitive=False) as schema_update: + schema_update.delete_column("FOO") + + assert simple_table.schema() == Schema( + # foo is missing 👍 + NestedField(field_id=2, name="bar", field_type=IntegerType(), required=True), + NestedField(field_id=3, name="baz", field_type=BooleanType(), required=False), + identifier_field_ids=[2], + ) + + +@pytest.mark.integration +def test_delete_identifier_fields(simple_table: Table) -> None: + with pytest.raises(ValueError) as exc_info: + with simple_table.update_schema() as schema_update: + schema_update.delete_column("bar") + + assert "Cannot find identifier field bar. In case of deletion, update the identifier fields first." in str(exc_info) + + +@pytest.mark.integration +def test_delete_identifier_fields_nested(catalog: Catalog) -> None: + tbl = _create_table_with_schema( + catalog, + Schema( + NestedField(field_id=1, name="foo", field_type=StringType(), required=False), + NestedField( + field_id=2, + name="person", + field_type=StructType( + NestedField(field_id=3, name="name", field_type=StringType(), required=True), + NestedField(field_id=4, name="age", field_type=IntegerType(), required=True), + ), + required=True, + ), + identifier_field_ids=[3], + ), + ) + + with pytest.raises(ValueError) as exc_info: + with tbl.update_schema() as schema_update: + schema_update.delete_column("person") + + assert "Cannot find identifier field person.name. In case of deletion, update the identifier fields first." in str(exc_info) + + +@pytest.mark.parametrize( + "field", + [ + "foo", + "baz", + "qux", + "quux", + "location", + "location.element.latitude", + "location.element.longitude", + "person", + "person.name", + "person.age", + ], +) +@pytest.mark.integration +def test_deletes(field: str, nested_table: Table) -> None: + with nested_table.update_schema() as schema_update: + schema_update.delete_column(field) + + selected_ids = { + field_id + for field_id in schema_nested.field_ids + if not isinstance(schema_nested.find_field(field_id).field_type, (MapType, ListType)) + and not schema_nested.find_column_name(field_id).startswith(field) # type: ignore + } + expected_schema = prune_columns(schema_nested, selected_ids, select_full_types=False) + + assert expected_schema == nested_table.schema() + + +@pytest.mark.parametrize( + "field", + [ + "Foo", + "Baz", + "Qux", + "Quux", + "Location", + "Location.element.latitude", + "Location.element.longitude", + "Person", + "Person.name", + "Person.age", + ], +) +@pytest.mark.integration +def test_deletes_case_insensitive(field: str, nested_table: Table) -> None: + with nested_table.update_schema(case_sensitive=False) as schema_update: + schema_update.delete_column(field) + + selected_ids = { + field_id + for field_id in schema_nested.field_ids + if not isinstance(schema_nested.find_field(field_id).field_type, (MapType, ListType)) + and not schema_nested.find_column_name(field_id).startswith(field.lower()) # type: ignore + } + expected_schema = prune_columns(schema_nested, selected_ids, select_full_types=False) + + assert expected_schema == nested_table.schema() + + +@pytest.mark.integration +def test_update_types(catalog: Catalog) -> None: + tbl = _create_table_with_schema( + catalog, + Schema( + NestedField(field_id=1, name="bar", field_type=IntegerType(), required=True), + NestedField( + field_id=2, + name="location", + field_type=ListType( + type="list", + element_id=3, + element_type=StructType( + NestedField(field_id=4, name="latitude", field_type=FloatType(), required=False), + NestedField(field_id=5, name="longitude", field_type=FloatType(), required=False), + ), + element_required=True, + ), + required=True, + ), + ), + ) + + with tbl.update_schema() as schema_update: + schema_update.update_column("bar", LongType()) + schema_update.update_column("location.latitude", DoubleType()) + schema_update.update_column("location.longitude", DoubleType()) + + assert tbl.schema() == Schema( + NestedField(field_id=1, name="bar", field_type=LongType(), required=True), + NestedField( + field_id=2, + name="location", + field_type=ListType( + type="list", + element_id=3, + element_type=StructType( + NestedField(field_id=4, name="latitude", field_type=DoubleType(), required=False), + NestedField(field_id=5, name="longitude", field_type=DoubleType(), required=False), + ), + element_required=True, + ), + required=True, + ), + ) + + +@pytest.mark.integration +def test_update_types_case_insensitive(catalog: Catalog) -> None: + tbl = _create_table_with_schema( + catalog, + Schema( + NestedField(field_id=1, name="bar", field_type=IntegerType(), required=True), + NestedField( + field_id=2, + name="location", + field_type=ListType( + type="list", + element_id=3, + element_type=StructType( + NestedField(field_id=4, name="latitude", field_type=FloatType(), required=False), + NestedField(field_id=5, name="longitude", field_type=FloatType(), required=False), + ), + element_required=True, + ), + required=True, + ), + ), + ) + + with tbl.update_schema(case_sensitive=False) as schema_update: + schema_update.update_column("baR", LongType()) + schema_update.update_column("Location.Latitude", DoubleType()) + schema_update.update_column("Location.Longitude", DoubleType()) + + assert tbl.schema() == Schema( + NestedField(field_id=1, name="bar", field_type=LongType(), required=True), + NestedField( + field_id=2, + name="location", + field_type=ListType( + type="list", + element_id=3, + element_type=StructType( + NestedField(field_id=4, name="latitude", field_type=DoubleType(), required=False), + NestedField(field_id=5, name="longitude", field_type=DoubleType(), required=False), + ), + element_required=True, + ), + required=True, + ), + ) + + +allowed_promotions = [ + (StringType(), BinaryType()), + (BinaryType(), StringType()), + (IntegerType(), LongType()), + (FloatType(), DoubleType()), + (DecimalType(9, 2), DecimalType(18, 2)), +] + + +@pytest.mark.parametrize("from_type, to_type", allowed_promotions, ids=str) +@pytest.mark.integration +def test_allowed_updates(from_type: PrimitiveType, to_type: PrimitiveType, catalog: Catalog) -> None: + tbl = _create_table_with_schema( + catalog, + Schema( + NestedField(field_id=1, name="bar", field_type=from_type, required=True), + ), + ) + + with tbl.update_schema() as schema_update: + schema_update.update_column("bar", to_type) + + assert tbl.schema() == Schema(NestedField(field_id=1, name="bar", field_type=to_type, required=True)) + + +disallowed_promotions_types = [ + BooleanType(), + IntegerType(), + LongType(), + FloatType(), + DoubleType(), + DateType(), + TimeType(), + TimestampType(), + TimestamptzType(), + StringType(), + UUIDType(), + BinaryType(), + FixedType(3), + FixedType(4), + # We'll just allow Decimal promotions right now + # https://github.com/apache/iceberg/issues/8389 + # DecimalType(9, 2), + # DecimalType(9, 3), + DecimalType(18, 2), +] + + +@pytest.mark.parametrize("from_type", disallowed_promotions_types, ids=str) +@pytest.mark.parametrize("to_type", disallowed_promotions_types, ids=str) +@pytest.mark.integration +def test_disallowed_updates(from_type: PrimitiveType, to_type: PrimitiveType, catalog: Catalog) -> None: + tbl = _create_table_with_schema( + catalog, + Schema( + NestedField(field_id=1, name="bar", field_type=from_type, required=True), + ), + ) + + if from_type != to_type and (from_type, to_type) not in allowed_promotions: + with pytest.raises(ValidationError) as exc_info: + with tbl.update_schema() as schema_update: + schema_update.update_column("bar", to_type) + + assert str(exc_info.value).startswith("Cannot change column type: bar:") + else: + with tbl.update_schema() as schema_update: + schema_update.update_column("bar", to_type) + + assert tbl.schema() == Schema( + NestedField(field_id=1, name="bar", field_type=to_type, required=True), + ) + + +@pytest.mark.integration +def test_rename_simple(simple_table: Table) -> None: + with simple_table.update_schema() as schema_update: + schema_update.rename_column("foo", "vo") + + assert simple_table.schema() == Schema( + NestedField(field_id=1, name="vo", field_type=StringType(), required=False), + NestedField(field_id=2, name="bar", field_type=IntegerType(), required=True), + NestedField(field_id=3, name="baz", field_type=BooleanType(), required=False), + identifier_field_ids=[2], + ) + + +@pytest.mark.integration +def test_rename_simple_nested(catalog: Catalog) -> None: + tbl = _create_table_with_schema( + catalog, + Schema( + NestedField( + field_id=1, + name="foo", + field_type=StructType(NestedField(field_id=2, name="bar", field_type=StringType())), + required=True, + ), + ), + ) + + with tbl.update_schema() as schema_update: + schema_update.rename_column("foo.bar", "vo") + + assert tbl.schema() == Schema( + NestedField( + field_id=1, + name="foo", + field_type=StructType(NestedField(field_id=2, name="vo", field_type=StringType())), + required=True, + ), + ) + + +@pytest.mark.integration +def test_rename_simple_nested_with_dots(catalog: Catalog) -> None: + tbl = _create_table_with_schema( + catalog, + Schema( + NestedField( + field_id=1, + name="a.b", + field_type=StructType(NestedField(field_id=2, name="c.d", field_type=StringType())), + required=True, + ), + ), + ) + + with tbl.update_schema() as schema_update: + schema_update.rename_column(("a.b", "c.d"), "e.f") + + assert tbl.schema() == Schema( + NestedField( + field_id=1, + name="a.b", + field_type=StructType(NestedField(field_id=2, name="e.f", field_type=StringType())), + required=True, + ), + ) + + +@pytest.mark.integration +def test_rename(catalog: Catalog) -> None: + tbl = _create_table_with_schema( + catalog, + Schema( + NestedField( + field_id=1, + name="location_lookup", + field_type=MapType( + type="map", + key_id=5, + key_type=StringType(), + value_id=6, + value_type=StructType( + NestedField(field_id=7, name="x", field_type=FloatType(), required=False), + NestedField(field_id=8, name="y", field_type=FloatType(), required=False), + ), + value_required=True, + ), + required=True, + ), + NestedField( + field_id=2, + name="locations", + field_type=ListType( + type="list", + element_id=9, + element_type=StructType( + NestedField(field_id=10, name="x", field_type=FloatType(), required=False), + NestedField(field_id=11, name="y", field_type=FloatType(), required=False), + ), + element_required=True, + ), + required=True, + ), + NestedField( + field_id=3, + name="person", + field_type=StructType( + NestedField(field_id=12, name="name", field_type=StringType(), required=False), + NestedField(field_id=13, name="leeftijd", field_type=IntegerType(), required=True), + ), + required=False, + ), + NestedField(field_id=4, name="foo", field_type=StringType(), required=True), + identifier_field_ids=[], + ), + ) + + with tbl.update_schema() as schema_update: + schema_update.rename_column("foo", "bar") + schema_update.rename_column("location_lookup.x", "latitude") + schema_update.rename_column("locations.x", "latitude") + schema_update.rename_column("person.leeftijd", "age") + + assert tbl.schema() == Schema( + NestedField( + field_id=1, + name="location_lookup", + field_type=MapType( + type="map", + key_id=5, + key_type=StringType(), + value_id=6, + value_type=StructType( + NestedField(field_id=7, name="latitude", field_type=FloatType(), required=False), + NestedField(field_id=8, name="y", field_type=FloatType(), required=False), + ), + value_required=True, + ), + required=True, + ), + NestedField( + field_id=2, + name="locations", + field_type=ListType( + type="list", + element_id=9, + element_type=StructType( + NestedField(field_id=10, name="latitude", field_type=FloatType(), required=False), + NestedField(field_id=11, name="y", field_type=FloatType(), required=False), + ), + element_required=True, + ), + required=True, + ), + NestedField( + field_id=3, + name="person", + field_type=StructType( + NestedField(field_id=12, name="name", field_type=StringType(), required=False), + NestedField(field_id=13, name="age", field_type=IntegerType(), required=True), + ), + required=False, + ), + NestedField(field_id=4, name="bar", field_type=StringType(), required=True), + identifier_field_ids=[], + ) + + +@pytest.mark.integration +def test_rename_case_insensitive(catalog: Catalog) -> None: + tbl = _create_table_with_schema( + catalog, + Schema( + NestedField( + field_id=1, + name="location_lookup", + field_type=MapType( + type="map", + key_id=5, + key_type=StringType(), + value_id=6, + value_type=StructType( + NestedField(field_id=7, name="x", field_type=FloatType(), required=False), + NestedField(field_id=8, name="y", field_type=FloatType(), required=False), + ), + value_required=True, + ), + required=True, + ), + NestedField( + field_id=2, + name="locations", + field_type=ListType( + type="list", + element_id=9, + element_type=StructType( + NestedField(field_id=10, name="x", field_type=FloatType(), required=False), + NestedField(field_id=11, name="y", field_type=FloatType(), required=False), + ), + element_required=True, + ), + required=True, + ), + NestedField( + field_id=3, + name="person", + field_type=StructType( + NestedField(field_id=12, name="name", field_type=StringType(), required=False), + NestedField(field_id=13, name="leeftijd", field_type=IntegerType(), required=True), + ), + required=True, + ), + NestedField(field_id=4, name="foo", field_type=StringType(), required=True), + identifier_field_ids=[13], + ), + ) + + with tbl.update_schema(case_sensitive=False) as schema_update: + schema_update.rename_column("Foo", "bar") + schema_update.rename_column("Location_lookup.X", "latitude") + schema_update.rename_column("Locations.X", "latitude") + schema_update.rename_column("Person.Leeftijd", "age") + + assert tbl.schema() == Schema( + NestedField( + field_id=1, + name="location_lookup", + field_type=MapType( + type="map", + key_id=5, + key_type=StringType(), + value_id=6, + value_type=StructType( + NestedField(field_id=7, name="latitude", field_type=FloatType(), required=False), + NestedField(field_id=8, name="y", field_type=FloatType(), required=False), + ), + value_required=True, + ), + required=True, + ), + NestedField( + field_id=2, + name="locations", + field_type=ListType( + type="list", + element_id=9, + element_type=StructType( + NestedField(field_id=10, name="latitude", field_type=FloatType(), required=False), + NestedField(field_id=11, name="y", field_type=FloatType(), required=False), + ), + element_required=True, + ), + required=True, + ), + NestedField( + field_id=3, + name="person", + field_type=StructType( + NestedField(field_id=12, name="name", field_type=StringType(), required=False), + NestedField(field_id=13, name="age", field_type=IntegerType(), required=True), + ), + required=True, + ), + NestedField(field_id=4, name="bar", field_type=StringType(), required=True), + identifier_field_ids=[13], + ) + + +@pytest.mark.integration +def test_add_struct(catalog: Catalog) -> None: + tbl = _create_table_with_schema( + catalog, + Schema( + NestedField(field_id=1, name="foo", field_type=StringType()), + ), + ) + + struct = StructType( + NestedField(field_id=3, name="x", field_type=DoubleType(), required=False), + NestedField(field_id=4, name="y", field_type=DoubleType(), required=False), + ) + + with tbl.update_schema() as schema_update: + schema_update.add_column("location", struct) + + assert tbl.schema() == Schema( + NestedField(field_id=1, name="foo", field_type=StringType()), + NestedField(field_id=2, name="location", field_type=struct, required=False), + ) + + +@pytest.mark.integration +def test_add_nested_map_of_structs(catalog: Catalog) -> None: + tbl = _create_table_with_schema( + catalog, + Schema( + NestedField(field_id=1, name="foo", field_type=StringType()), + ), + ) + + map_type_example = MapType( + key_id=1, + value_id=2, + key_type=StructType( + NestedField(field_id=20, name="address", field_type=StringType(), required=True), + NestedField(field_id=21, name="city", field_type=StringType(), required=True), + NestedField(field_id=22, name="state", field_type=StringType(), required=True), + NestedField(field_id=23, name="zip", field_type=IntegerType(), required=True), + ), + value_type=StructType( + NestedField(field_id=9, name="lat", field_type=DoubleType(), required=True), + NestedField(field_id=8, name="long", field_type=DoubleType(), required=False), + ), + ) + + with tbl.update_schema() as schema_update: + schema_update.add_column("locations", map_type_example) + + assert tbl.schema() == Schema( + NestedField(field_id=1, name="foo", field_type=StringType(), required=True), + NestedField( + field_id=2, + name="locations", + field_type=MapType( + type="map", + key_id=3, + key_type=StructType( + NestedField(field_id=5, name="address", field_type=StringType(), required=True), + NestedField(field_id=6, name="city", field_type=StringType(), required=True), + NestedField(field_id=7, name="state", field_type=StringType(), required=True), + NestedField(field_id=8, name="zip", field_type=IntegerType(), required=True), + ), + value_id=4, + value_type=StructType( + NestedField(field_id=9, name="lat", field_type=DoubleType(), required=True), + NestedField(field_id=10, name="long", field_type=DoubleType(), required=False), + ), + value_required=True, + ), + required=False, + ), + ) + + +@pytest.mark.integration +def test_add_nested_list_of_structs(catalog: Catalog) -> None: + tbl = _create_table_with_schema( + catalog, + Schema( + NestedField(field_id=1, name="foo", field_type=StringType()), + ), + ) + + list_type_examples = ListType( + element_id=1, + element_type=StructType( + NestedField(field_id=9, name="lat", field_type=DoubleType(), required=True), + NestedField(field_id=10, name="long", field_type=DoubleType(), required=False), + ), + element_required=False, + ) + + with tbl.update_schema() as schema_update: + schema_update.add_column("locations", list_type_examples) + + assert tbl.schema() == Schema( + NestedField(field_id=1, name="foo", field_type=StringType(), required=True), + NestedField( + field_id=2, + name="locations", + field_type=ListType( + type="list", + element_id=3, + element_type=StructType( + NestedField(field_id=4, name="lat", field_type=DoubleType(), required=True), + NestedField(field_id=5, name="long", field_type=DoubleType(), required=False), + ), + element_required=False, + ), + required=False, + ), + ) + + +@pytest.mark.integration +def test_add_required_column(catalog: Catalog) -> None: + schema_ = Schema(NestedField(field_id=1, name="a", field_type=BooleanType(), required=False)) + table = _create_table_with_schema(catalog, schema_) + update = UpdateSchema(table) + with pytest.raises(ValueError) as exc_info: + update.add_column(path="data", field_type=IntegerType(), required=True) + assert "Incompatible change: cannot add required column: data" in str(exc_info.value) + + new_schema = ( + UpdateSchema(table, allow_incompatible_changes=True) # pylint: disable=W0212 + .add_column(path="data", field_type=IntegerType(), required=True) + ._apply() + ) + assert new_schema == Schema( + NestedField(field_id=1, name="a", field_type=BooleanType(), required=False), + NestedField(field_id=2, name="data", field_type=IntegerType(), required=True), + ) + + +@pytest.mark.integration +def test_add_required_column_case_insensitive(catalog: Catalog) -> None: + schema_ = Schema(NestedField(field_id=1, name="id", field_type=BooleanType(), required=False)) + table = _create_table_with_schema(catalog, schema_) + + with pytest.raises(ValueError) as exc_info: + with UpdateSchema(table, allow_incompatible_changes=True) as update: + update.case_sensitive(False).add_column(path="ID", field_type=IntegerType(), required=True) + assert "already exists: ID" in str(exc_info.value) + + new_schema = ( + UpdateSchema(table, allow_incompatible_changes=True) # pylint: disable=W0212 + .add_column(path="ID", field_type=IntegerType(), required=True) + ._apply() + ) + assert new_schema == Schema( + NestedField(field_id=1, name="id", field_type=BooleanType(), required=False), + NestedField(field_id=2, name="ID", field_type=IntegerType(), required=True), + ) + + +@pytest.mark.integration +def test_make_column_optional(catalog: Catalog) -> None: + tbl = _create_table_with_schema( + catalog, + Schema( + NestedField(field_id=1, name="foo", field_type=StringType(), required=True), + ), + ) + + with tbl.update_schema() as schema_update: + schema_update.make_column_optional("foo") + + assert tbl.schema() == Schema( + NestedField(field_id=1, name="foo", field_type=StringType(), required=False), + ) + + +@pytest.mark.integration +def test_mixed_changes(catalog: Catalog) -> None: + tbl = _create_table_with_schema( + catalog, + Schema( + NestedField(field_id=1, name="id", field_type=StringType(), required=True), + NestedField(field_id=2, name="data", field_type=StringType(), required=False), + NestedField( + field_id=3, + name="preferences", + field_type=StructType( + NestedField(field_id=8, name="feature1", type=BooleanType(), required=True), + NestedField(field_id=9, name="feature2", type=BooleanType(), required=False), + ), + required=False, + ), + NestedField( + field_id=4, + name="locations", + field_type=MapType( + key_id=10, + value_id=11, + key_type=StructType( + NestedField(field_id=20, name="address", field_type=StringType(), required=True), + NestedField(field_id=21, name="city", field_type=StringType(), required=True), + NestedField(field_id=22, name="state", field_type=StringType(), required=True), + NestedField(field_id=23, name="zip", field_type=IntegerType(), required=True), + ), + value_type=StructType( + NestedField(field_id=12, name="lat", field_type=DoubleType(), required=True), + NestedField(field_id=13, name="long", field_type=DoubleType(), required=False), + ), + ), + required=True, + ), + NestedField( + field_id=5, + name="points", + field_type=ListType( + element_id=14, + element_type=StructType( + NestedField(field_id=15, name="x", field_type=LongType(), required=True), + NestedField(field_id=16, name="y", field_type=LongType(), required=True), + ), + ), + required=True, + doc="2-D cartesian points", + ), + NestedField(field_id=6, name="doubles", field_type=ListType(element_id=17, element_type=DoubleType()), required=True), + NestedField( + field_id=7, + name="properties", + field_type=MapType(key_id=18, value_id=19, key_type=StringType(), value_type=StringType()), + required=False, + ), + ), + ) + + with tbl.update_schema(allow_incompatible_changes=True) as schema_update: + schema_update.add_column("toplevel", field_type=DecimalType(9, 2)) + schema_update.add_column(("locations", "alt"), field_type=FloatType()) + schema_update.add_column(("points", "z"), field_type=LongType()) + schema_update.add_column(("points", "t.t"), field_type=LongType(), doc="name with '.'") + schema_update.rename_column("data", "json") + schema_update.rename_column("preferences", "options") + schema_update.rename_column("preferences.feature2", "newfeature") + schema_update.rename_column("locations.lat", "latitude") + schema_update.rename_column("points.x", "X") + schema_update.rename_column("points.y", "y.y") + schema_update.update_column("id", field_type=LongType(), doc="unique id") + schema_update.update_column("locations.lat", DoubleType()) + schema_update.update_column("locations.lat", doc="latitude") + schema_update.delete_column("locations.long") + schema_update.delete_column("properties") + schema_update.make_column_optional("points.x") + schema_update.update_column("data", required=True) + schema_update.add_column(("locations", "description"), StringType(), doc="location description") + + assert tbl.schema() == Schema( + NestedField(field_id=1, name="id", field_type=LongType(), required=True, doc="unique id"), + NestedField(field_id=2, name="json", field_type=StringType(), required=True), + NestedField( + field_id=3, + name="options", + field_type=StructType( + NestedField(field_id=8, name="feature1", field_type=BooleanType(), required=True), + NestedField(field_id=9, name="newfeature", field_type=BooleanType(), required=False), + ), + required=False, + ), + NestedField( + field_id=4, + name="locations", + field_type=MapType( + type="map", + key_id=10, + key_type=StructType( + NestedField(field_id=12, name="address", field_type=StringType(), required=True), + NestedField(field_id=13, name="city", field_type=StringType(), required=True), + NestedField(field_id=14, name="state", field_type=StringType(), required=True), + NestedField(field_id=15, name="zip", field_type=IntegerType(), required=True), + ), + value_id=11, + value_type=StructType( + NestedField(field_id=16, name="latitude", field_type=DoubleType(), required=True, doc="latitude"), + NestedField(field_id=25, name="alt", field_type=FloatType(), required=False), + NestedField( + field_id=28, name="description", field_type=StringType(), required=False, doc="location description" + ), + ), + value_required=True, + ), + required=True, + ), + NestedField( + field_id=5, + name="points", + field_type=ListType( + type="list", + element_id=18, + element_type=StructType( + NestedField(field_id=19, name="X", field_type=LongType(), required=False), + NestedField(field_id=20, name="y.y", field_type=LongType(), required=True), + NestedField(field_id=26, name="z", field_type=LongType(), required=False), + NestedField(field_id=27, name="t.t", field_type=LongType(), required=False, doc="name with '.'"), + ), + element_required=True, + ), + doc="2-D cartesian points", + required=True, + ), + NestedField( + field_id=6, + name="doubles", + field_type=ListType(type="list", element_id=21, element_type=DoubleType(), element_required=True), + required=True, + ), + NestedField(field_id=24, name="toplevel", field_type=DecimalType(precision=9, scale=2), required=False), + ) + + +@pytest.mark.integration +def test_ambiguous_column(catalog: Catalog, table_schema_nested: Schema) -> None: + table = _create_table_with_schema(catalog, table_schema_nested) + update = UpdateSchema(table) + + with pytest.raises(ValueError) as exc_info: + update.add_column(path="location.latitude", field_type=IntegerType()) + assert "Cannot add column with ambiguous name: location.latitude, provide a tuple instead" in str(exc_info.value) + + +@pytest.mark.integration +def test_delete_then_add(catalog: Catalog) -> None: + tbl = _create_table_with_schema( + catalog, + Schema( + NestedField(field_id=1, name="foo", field_type=StringType(), required=True), + ), + ) + + with tbl.update_schema() as schema_update: + schema_update.delete_column("foo") + schema_update.add_column("foo", StringType()) + + assert tbl.schema() == Schema( + NestedField(field_id=2, name="foo", field_type=StringType(), required=False), + ) + + +@pytest.mark.integration +def test_delete_then_add_nested(catalog: Catalog) -> None: + tbl = _create_table_with_schema( + catalog, + Schema( + NestedField( + field_id=1, + name="preferences", + field_type=StructType( + NestedField(field_id=2, name="feature1", field_type=BooleanType()), + NestedField(field_id=3, name="feature2", field_type=BooleanType()), + ), + required=True, + ), + ), + ) + + with tbl.update_schema() as schema_update: + schema_update.delete_column("preferences.feature1") + schema_update.add_column(("preferences", "feature1"), BooleanType()) + + assert tbl.schema() == Schema( + NestedField( + field_id=1, + name="preferences", + field_type=StructType( + NestedField(field_id=3, name="feature2", field_type=BooleanType()), + NestedField(field_id=4, name="feature1", field_type=BooleanType(), required=False), + ), + required=True, + ), + ) + + +@pytest.mark.integration +def test_delete_missing_column(catalog: Catalog) -> None: + tbl = _create_table_with_schema( + catalog, + Schema( + NestedField(field_id=1, name="foo", field_type=StringType(), required=True), + ), + ) + + with pytest.raises(ValueError) as exc_info: + with tbl.update_schema() as schema_update: + schema_update.delete_column("bar") + + assert "Could not find field with name bar, case_sensitive=True" in str(exc_info.value) + + +@pytest.mark.integration +def test_add_delete_conflict(catalog: Catalog) -> None: + tbl = _create_table_with_schema( + catalog, + Schema( + NestedField(field_id=1, name="foo", field_type=StringType(), required=True), + ), + ) + + with pytest.raises(ValueError) as exc_info: + with tbl.update_schema() as schema_update: + schema_update.add_column("bar", BooleanType()) + schema_update.delete_column("bar") + assert "Could not find field with name bar, case_sensitive=True" in str(exc_info.value) + + tbl = _create_table_with_schema( + catalog, + Schema( + NestedField( + field_id=1, + name="preferences", + field_type=StructType( + NestedField(field_id=2, name="feature1", field_type=BooleanType()), + NestedField(field_id=3, name="feature2", field_type=BooleanType()), + ), + required=True, + ), + ), + ) + + with pytest.raises(ValueError) as exc_info: + with tbl.update_schema() as schema_update: + schema_update.add_column(("preferences", "feature3"), BooleanType()) + schema_update.delete_column("preferences") + assert "Cannot delete a column that has additions: preferences" in str(exc_info.value) + + +@pytest.mark.integration +def test_rename_missing_column(catalog: Catalog) -> None: + tbl = _create_table_with_schema( + catalog, + Schema( + NestedField(field_id=1, name="foo", field_type=StringType(), required=True), + ), + ) + + with pytest.raises(ValueError) as exc_info: + with tbl.update_schema() as schema_update: + schema_update.rename_column("bar", "fail") + + assert "Could not find field with name bar, case_sensitive=True" in str(exc_info.value) + + +@pytest.mark.integration +def test_rename_missing_conflicts(catalog: Catalog) -> None: + tbl = _create_table_with_schema( + catalog, + Schema( + NestedField(field_id=1, name="foo", field_type=StringType(), required=True), + ), + ) + + with pytest.raises(ValueError) as exc_info: + with tbl.update_schema() as schema_update: + schema_update.rename_column("foo", "bar") + schema_update.delete_column("foo") + + assert "Cannot delete a column that has updates: foo" in str(exc_info.value) + + tbl = _create_table_with_schema( + catalog, + Schema( + NestedField(field_id=1, name="foo", field_type=StringType(), required=True), + ), + ) + + with pytest.raises(ValueError) as exc_info: + with tbl.update_schema() as schema_update: + schema_update.rename_column("foo", "bar") + schema_update.delete_column("bar") + + assert "Could not find field with name bar, case_sensitive=True" in str(exc_info.value) + + +@pytest.mark.integration +def test_update_missing_column(catalog: Catalog) -> None: + tbl = _create_table_with_schema( + catalog, + Schema( + NestedField(field_id=1, name="foo", field_type=StringType(), required=True), + ), + ) + + with pytest.raises(ValueError) as exc_info: + with tbl.update_schema() as schema_update: + schema_update.update_column("bar", DateType()) + + assert "Could not find field with name bar, case_sensitive=True" in str(exc_info.value) + + +@pytest.mark.integration +def test_update_delete_conflict(catalog: Catalog) -> None: + tbl = _create_table_with_schema( + catalog, + Schema( + NestedField(field_id=1, name="foo", field_type=IntegerType(), required=True), + ), + ) + + with pytest.raises(ValueError) as exc_info: + with tbl.update_schema() as schema_update: + schema_update.update_column("foo", LongType()) + schema_update.delete_column("foo") + + assert "Cannot delete a column that has updates: foo" in str(exc_info.value) + + +@pytest.mark.integration +def test_delete_update_conflict(catalog: Catalog) -> None: + tbl = _create_table_with_schema( + catalog, + Schema( + NestedField(field_id=1, name="foo", field_type=IntegerType(), required=True), + ), + ) + + with pytest.raises(ValueError) as exc_info: + with tbl.update_schema() as schema_update: + schema_update.delete_column("foo") + schema_update.update_column("foo", LongType()) + + assert "Cannot update a column that will be deleted: foo" in str(exc_info.value) + + +@pytest.mark.integration +def test_delete_map_key(nested_table: Table) -> None: + with pytest.raises(ValueError) as exc_info: + with nested_table.update_schema() as schema_update: + schema_update.delete_column("quux.key") + + assert "Cannot delete map keys" in str(exc_info.value) + + +@pytest.mark.integration +def test_add_field_to_map_key(nested_table: Table) -> None: + with pytest.raises(ValueError) as exc_info: + with nested_table.update_schema() as schema_update: + schema_update.add_column(("quux", "key"), StringType()) + + assert "Cannot add column 'key' to non-struct type: quux" in str(exc_info.value) + + +@pytest.mark.integration +def test_alter_map_key(nested_table: Table) -> None: + with pytest.raises(ValueError) as exc_info: + with nested_table.update_schema() as schema_update: + schema_update.update_column(("quux", "key"), BinaryType()) + + assert "Cannot update map keys" in str(exc_info.value) + + +@pytest.mark.integration +def test_update_map_key(catalog: Catalog) -> None: + tbl = _create_table_with_schema( + catalog, + Schema( + NestedField( + field_id=1, name="m", field_type=MapType(key_id=2, value_id=3, key_type=IntegerType(), value_type=DoubleType()) + ) + ), + ) + with pytest.raises(ValueError) as exc_info: + with tbl.update_schema() as schema_update: + schema_update.update_column("m.key", LongType()) + + assert "Cannot update map keys: map" in str(exc_info.value) + + +@pytest.mark.integration +def test_update_added_column_doc(catalog: Catalog) -> None: + tbl = _create_table_with_schema( + catalog, + Schema( + NestedField(field_id=1, name="foo", field_type=StringType(), required=True), + ), + ) + + with pytest.raises(ValueError) as exc_info: + with tbl.update_schema() as schema_update: + schema_update.add_column("value", LongType()) + schema_update.update_column("value", doc="a value") + + assert "Could not find field with name value, case_sensitive=True" in str(exc_info.value) + + +@pytest.mark.integration +def test_update_deleted_column_doc(catalog: Catalog) -> None: + tbl = _create_table_with_schema( + catalog, + Schema( + NestedField(field_id=1, name="foo", field_type=StringType(), required=True), + ), + ) + + with pytest.raises(ValueError) as exc_info: + with tbl.update_schema() as schema_update: + schema_update.delete_column("foo") + schema_update.update_column("foo", doc="a value") + + assert "Cannot update a column that will be deleted: foo" in str(exc_info.value) + + +@pytest.mark.integration +def test_multiple_moves(catalog: Catalog) -> None: + tbl = _create_table_with_schema( + catalog, + Schema( + NestedField(field_id=1, name="a", field_type=IntegerType(), required=True), + NestedField(field_id=2, name="b", field_type=IntegerType(), required=True), + NestedField(field_id=3, name="c", field_type=IntegerType(), required=True), + NestedField(field_id=4, name="d", field_type=IntegerType(), required=True), + ), + ) + + with tbl.update_schema() as schema_update: + schema_update.move_first("d") + schema_update.move_first("c") + schema_update.move_after("b", "d") + schema_update.move_before("d", "a") + + assert tbl.schema() == Schema( + NestedField(field_id=3, name="c", field_type=IntegerType(), required=True), + NestedField(field_id=2, name="b", field_type=IntegerType(), required=True), + NestedField(field_id=4, name="d", field_type=IntegerType(), required=True), + NestedField(field_id=1, name="a", field_type=IntegerType(), required=True), + ) + + +@pytest.mark.integration +def test_move_top_level_column_first(catalog: Catalog) -> None: + tbl = _create_table_with_schema( + catalog, + Schema( + NestedField(field_id=1, name="id", field_type=LongType(), required=True), + NestedField(field_id=2, name="data", field_type=StringType(), required=True), + ), + ) + + with tbl.update_schema() as schema_update: + schema_update.move_first("data") + + assert tbl.schema() == Schema( + NestedField(field_id=2, name="data", field_type=StringType(), required=True), + NestedField(field_id=1, name="id", field_type=LongType(), required=True), + ) + + +@pytest.mark.integration +def test_move_top_level_column_before_first(catalog: Catalog) -> None: + tbl = _create_table_with_schema( + catalog, + Schema( + NestedField(field_id=1, name="id", field_type=LongType(), required=True), + NestedField(field_id=2, name="data", field_type=StringType(), required=True), + ), + ) + + with tbl.update_schema() as schema_update: + schema_update.move_before("data", "id") + + assert tbl.schema() == Schema( + NestedField(field_id=2, name="data", field_type=StringType(), required=True), + NestedField(field_id=1, name="id", field_type=LongType(), required=True), + ) + + +@pytest.mark.integration +def test_move_top_level_column_after_last(catalog: Catalog) -> None: + tbl = _create_table_with_schema( + catalog, + Schema( + NestedField(field_id=1, name="id", field_type=LongType(), required=True), + NestedField(field_id=2, name="data", field_type=StringType(), required=True), + ), + ) + + with tbl.update_schema() as schema_update: + schema_update.move_after("id", "data") + + assert tbl.schema() == Schema( + NestedField(field_id=2, name="data", field_type=StringType(), required=True), + NestedField(field_id=1, name="id", field_type=LongType(), required=True), + ) + + +@pytest.mark.integration +def test_move_nested_field_first(catalog: Catalog) -> None: + tbl = _create_table_with_schema( + catalog, + Schema( + NestedField(field_id=1, name="id", field_type=LongType(), required=True), + NestedField( + field_id=2, + name="struct", + field_type=StructType( + NestedField(field_id=3, name="count", field_type=LongType(), required=True), + NestedField(field_id=4, name="data", field_type=StringType(), required=True), + ), + required=True, + ), + ), + ) + + with tbl.update_schema() as schema_update: + schema_update.move_first("struct.data") + + assert tbl.schema() == Schema( + NestedField(field_id=1, name="id", field_type=LongType(), required=True), + NestedField( + field_id=2, + name="struct", + field_type=StructType( + NestedField(field_id=4, name="data", field_type=StringType(), required=True), + NestedField(field_id=3, name="count", field_type=LongType(), required=True), + ), + required=True, + ), + ) + + +@pytest.mark.integration +def test_move_nested_field_before_first(catalog: Catalog) -> None: + tbl = _create_table_with_schema( + catalog, + Schema( + NestedField(field_id=1, name="id", field_type=LongType(), required=True), + NestedField( + field_id=2, + name="struct", + field_type=StructType( + NestedField(field_id=3, name="count", field_type=LongType(), required=True), + NestedField(field_id=4, name="data", field_type=StringType(), required=True), + ), + required=True, + ), + ), + ) + + with tbl.update_schema() as schema_update: + schema_update.move_before("struct.data", "struct.count") + + assert tbl.schema() == Schema( + NestedField(field_id=1, name="id", field_type=LongType(), required=True), + NestedField( + field_id=2, + name="struct", + field_type=StructType( + NestedField(field_id=4, name="data", field_type=StringType(), required=True), + NestedField(field_id=3, name="count", field_type=LongType(), required=True), + ), + required=True, + ), + ) + + +@pytest.mark.integration +def test_move_nested_field_after_first(catalog: Catalog) -> None: + tbl = _create_table_with_schema( + catalog, + Schema( + NestedField(field_id=1, name="id", field_type=LongType(), required=True), + NestedField( + field_id=2, + name="struct", + field_type=StructType( + NestedField(field_id=3, name="count", field_type=LongType(), required=True), + NestedField(field_id=4, name="data", field_type=StringType(), required=True), + ), + required=True, + ), + ), + ) + + with tbl.update_schema() as schema_update: + schema_update.move_before("struct.data", "struct.count") + + assert str(tbl.schema()) == str( + Schema( + NestedField(field_id=1, name="id", field_type=LongType(), required=True), + NestedField( + field_id=2, + name="struct", + field_type=StructType( + NestedField(field_id=4, name="data", field_type=StringType(), required=True), + NestedField(field_id=3, name="count", field_type=LongType(), required=True), + ), + required=True, + ), + ) + ) + + +@pytest.mark.integration +def test_move_nested_field_after(catalog: Catalog) -> None: + tbl = _create_table_with_schema( + catalog, + Schema( + NestedField(field_id=1, name="id", field_type=LongType(), required=True), + NestedField( + field_id=2, + name="struct", + field_type=StructType( + NestedField(field_id=3, name="count", field_type=LongType(), required=True), + NestedField(field_id=4, name="data", field_type=StringType(), required=True), + NestedField(field_id=5, name="ts", field_type=TimestamptzType(), required=True), + ), + required=True, + ), + ), + ) + + with tbl.update_schema() as schema_update: + schema_update.move_after("struct.ts", "struct.count") + + assert tbl.schema() == Schema( + NestedField(field_id=1, name="id", field_type=LongType(), required=True), + NestedField( + field_id=2, + name="struct", + field_type=StructType( + NestedField(field_id=3, name="count", field_type=LongType(), required=True), + NestedField(field_id=5, name="ts", field_type=TimestamptzType(), required=True), + NestedField(field_id=4, name="data", field_type=StringType(), required=True), + ), + required=True, + ), + ) + + +@pytest.mark.integration +def test_move_nested_field_before(catalog: Catalog) -> None: + tbl = _create_table_with_schema( + catalog, + Schema( + NestedField(field_id=1, name="id", field_type=LongType(), required=True), + NestedField( + field_id=2, + name="struct", + field_type=StructType( + NestedField(field_id=3, name="count", field_type=LongType(), required=True), + NestedField(field_id=4, name="data", field_type=StringType(), required=True), + NestedField(field_id=5, name="ts", field_type=TimestamptzType(), required=True), + ), + required=True, + ), + ), + ) + + with tbl.update_schema() as schema_update: + schema_update.move_before("struct.ts", "struct.data") + + assert tbl.schema() == Schema( + NestedField(field_id=1, name="id", field_type=LongType(), required=True), + NestedField( + field_id=2, + name="struct", + field_type=StructType( + NestedField(field_id=3, name="count", field_type=LongType(), required=True), + NestedField(field_id=5, name="ts", field_type=TimestamptzType(), required=True), + NestedField(field_id=4, name="data", field_type=StringType(), required=True), + ), + required=True, + ), + ) + + +@pytest.mark.integration +def test_move_map_value_struct_field(catalog: Catalog) -> None: + tbl = _create_table_with_schema( + catalog, + Schema( + NestedField(field_id=1, name="id", field_type=LongType(), required=True), + NestedField( + field_id=2, + name="map", + field_type=MapType( + key_id=3, + value_id=4, + key_type=StringType(), + value_type=StructType( + NestedField(field_id=5, name="ts", field_type=TimestamptzType(), required=True), + NestedField(field_id=6, name="count", field_type=LongType(), required=True), + NestedField(field_id=7, name="data", field_type=StringType(), required=True), + ), + ), + required=True, + ), + ), + ) + + with tbl.update_schema() as schema_update: + schema_update.move_before("map.ts", "map.data") + + assert tbl.schema() == Schema( + NestedField(field_id=1, name="id", field_type=LongType(), required=True), + NestedField( + field_id=2, + name="map", + field_type=MapType( + key_id=3, + value_id=4, + key_type=StringType(), + value_type=StructType( + NestedField(field_id=6, name="count", field_type=LongType(), required=True), + NestedField(field_id=5, name="ts", field_type=TimestamptzType(), required=True), + NestedField(field_id=7, name="data", field_type=StringType(), required=True), + ), + ), + required=True, + ), + ) + + +@pytest.mark.integration +def test_move_added_top_level_column(catalog: Catalog) -> None: + tbl = _create_table_with_schema( + catalog, + Schema( + NestedField(field_id=1, name="id", field_type=LongType(), required=True), + NestedField(field_id=2, name="data", field_type=StringType(), required=True), + ), + ) + + with tbl.update_schema() as schema_update: + schema_update.add_column("ts", TimestamptzType()) + schema_update.move_after("ts", "id") + + assert tbl.schema() == Schema( + NestedField(field_id=1, name="id", field_type=LongType(), required=True), + NestedField(field_id=3, name="ts", field_type=TimestamptzType(), required=False), + NestedField(field_id=2, name="data", field_type=StringType(), required=True), + ) + + +@pytest.mark.integration +def test_move_added_top_level_column_after_added_column(catalog: Catalog) -> None: + tbl = _create_table_with_schema( + catalog, + Schema( + NestedField(field_id=1, name="id", field_type=LongType(), required=True), + NestedField(field_id=2, name="data", field_type=StringType(), required=True), + ), + ) + + with tbl.update_schema() as schema_update: + schema_update.add_column("ts", TimestamptzType()) + schema_update.add_column("count", LongType()) + schema_update.move_after("ts", "id") + schema_update.move_after("count", "ts") + + assert tbl.schema() == Schema( + NestedField(field_id=1, name="id", field_type=LongType(), required=True), + NestedField(field_id=3, name="ts", field_type=TimestamptzType(), required=False), + NestedField(field_id=4, name="count", field_type=LongType(), required=False), + NestedField(field_id=2, name="data", field_type=StringType(), required=True), + ) + + +@pytest.mark.integration +def test_move_added_nested_struct_field(catalog: Catalog) -> None: + tbl = _create_table_with_schema( + catalog, + Schema( + NestedField(field_id=1, name="id", field_type=LongType(), required=True), + NestedField( + field_id=2, + name="struct", + field_type=StructType( + NestedField(field_id=3, name="count", field_type=LongType(), required=True), + NestedField(field_id=4, name="data", field_type=StringType(), required=True), + ), + required=True, + ), + ), + ) + + with tbl.update_schema() as schema_update: + schema_update.add_column(("struct", "ts"), TimestamptzType()) + schema_update.move_before("struct.ts", "struct.count") + + assert tbl.schema() == Schema( + NestedField(field_id=1, name="id", field_type=LongType(), required=True), + NestedField( + field_id=2, + name="struct", + field_type=StructType( + NestedField(field_id=5, name="ts", field_type=TimestamptzType(), required=False), + NestedField(field_id=3, name="count", field_type=LongType(), required=True), + NestedField(field_id=4, name="data", field_type=StringType(), required=True), + ), + required=True, + ), + ) + + +@pytest.mark.integration +def test_move_added_nested_field_before_added_column(catalog: Catalog) -> None: + tbl = _create_table_with_schema( + catalog, + Schema( + NestedField(field_id=1, name="id", field_type=LongType(), required=True), + NestedField( + field_id=2, + name="struct", + field_type=StructType( + NestedField(field_id=3, name="count", field_type=LongType(), required=True), + NestedField(field_id=4, name="data", field_type=StringType(), required=True), + ), + required=True, + ), + ), + ) + + with tbl.update_schema() as schema_update: + schema_update.add_column(("struct", "ts"), TimestamptzType()) + schema_update.add_column(("struct", "size"), LongType()) + schema_update.move_before("struct.ts", "struct.count") + schema_update.move_before("struct.size", "struct.ts") + + assert tbl.schema() == Schema( + NestedField(field_id=1, name="id", field_type=LongType(), required=True), + NestedField( + field_id=2, + name="struct", + field_type=StructType( + NestedField(field_id=6, name="size", field_type=LongType(), required=False), + NestedField(field_id=5, name="ts", field_type=TimestamptzType(), required=False), + NestedField(field_id=3, name="count", field_type=LongType(), required=True), + NestedField(field_id=4, name="data", field_type=StringType(), required=True), + ), + required=True, + ), + ) + + +@pytest.mark.integration +def test_move_self_reference_fails(catalog: Catalog) -> None: + tbl = _create_table_with_schema( + catalog, + Schema( + NestedField(field_id=1, name="foo", field_type=StringType()), + ), + ) + + with pytest.raises(ValueError) as exc_info: + with tbl.update_schema() as update: + update.move_before("foo", "foo") + assert "Cannot move foo before itself" in str(exc_info.value) + + with pytest.raises(ValueError) as exc_info: + with tbl.update_schema() as update: + update.move_after("foo", "foo") + assert "Cannot move foo after itself" in str(exc_info.value) + + +@pytest.mark.integration +def test_move_missing_column_fails(catalog: Catalog) -> None: + tbl = _create_table_with_schema( + catalog, + Schema( + NestedField(field_id=1, name="foo", field_type=StringType()), + ), + ) + + with pytest.raises(ValueError) as exc_info: + with tbl.update_schema() as update: + update.move_first("items") + assert "Cannot move missing column: items" in str(exc_info.value) + + with pytest.raises(ValueError) as exc_info: + with tbl.update_schema() as update: + update.move_before("items", "id") + assert "Cannot move missing column: items" in str(exc_info.value) + + with pytest.raises(ValueError) as exc_info: + with tbl.update_schema() as update: + update.move_after("items", "data") + assert "Cannot move missing column: items" in str(exc_info.value) + + +@pytest.mark.integration +def test_move_before_add_fails(catalog: Catalog) -> None: + tbl = _create_table_with_schema( + catalog, + Schema( + NestedField(field_id=1, name="foo", field_type=StringType()), + ), + ) + + with pytest.raises(ValueError) as exc_info: + with tbl.update_schema() as update: + update.move_first("ts") + update.add_column("ts", TimestamptzType()) + assert "Cannot move missing column: ts" in str(exc_info.value) + + with pytest.raises(ValueError) as exc_info: + with tbl.update_schema() as update: + update.move_before("ts", "id") + update.add_column("ts", TimestamptzType()) + assert "Cannot move missing column: ts" in str(exc_info.value) + + with pytest.raises(ValueError) as exc_info: + with tbl.update_schema() as update: + update.move_after("ts", "data") + update.add_column("ts", TimestamptzType()) + assert "Cannot move missing column: ts" in str(exc_info.value) + + +@pytest.mark.integration +def test_move_missing_reference_column_fails(catalog: Catalog) -> None: + tbl = _create_table_with_schema( + catalog, + Schema( + NestedField(field_id=1, name="id", field_type=LongType(), required=True), + NestedField(field_id=2, name="data", field_type=StringType(), required=True), + ), + ) + + with pytest.raises(ValueError) as exc_info: + with tbl.update_schema() as update: + update.move_before("id", "items") + assert "Cannot move id before missing column: items" in str(exc_info.value) + + with pytest.raises(ValueError) as exc_info: + with tbl.update_schema() as update: + update.move_after("data", "items") + assert "Cannot move data after missing column: items" in str(exc_info.value) + + +@pytest.mark.integration +def test_move_primitive_map_key_fails(catalog: Catalog) -> None: + tbl = _create_table_with_schema( + catalog, + Schema( + NestedField(field_id=1, name="id", field_type=LongType(), required=True), + NestedField(field_id=2, name="data", field_type=StringType(), required=True), + NestedField( + field_id=3, + name="map", + field_type=MapType(key_id=4, value_id=5, key_type=StringType(), value_type=StringType()), + required=False, + ), + ), + ) + + with pytest.raises(ValueError) as exc_info: + with tbl.update_schema() as update: + update.move_before("map.key", "map.value") + assert "Cannot move fields in non-struct type: map" in str(exc_info.value) + + +@pytest.mark.integration +def test_move_primitive_map_value_fails(catalog: Catalog) -> None: + tbl = _create_table_with_schema( + catalog, + Schema( + NestedField(field_id=1, name="id", field_type=LongType(), required=True), + NestedField(field_id=2, name="data", field_type=StringType(), required=True), + NestedField( + field_id=3, + name="map", + field_type=MapType(key_id=4, value_id=5, key_type=StringType(), value_type=StructType()), + required=False, + ), + ), + ) + + with pytest.raises(ValueError) as exc_info: + with tbl.update_schema() as update: + update.move_before("map.value", "map.key") + assert "Cannot move fields in non-struct type: map>" in str(exc_info.value) + + +@pytest.mark.integration +def test_move_top_level_between_structs_fails(catalog: Catalog) -> None: + tbl = _create_table_with_schema( + catalog, + Schema( + NestedField(field_id=1, name="a", field_type=IntegerType(), required=True), + NestedField(field_id=2, name="b", field_type=IntegerType(), required=True), + NestedField( + field_id=3, + name="struct", + field_type=StructType( + NestedField(field_id=4, name="x", field_type=IntegerType(), required=True), + NestedField(field_id=5, name="y", field_type=IntegerType(), required=True), + ), + required=False, + ), + ), + ) + + with pytest.raises(ValueError) as exc_info: + with tbl.update_schema() as update: + update.move_before("a", "struct.x") + assert "Cannot move field a to a different struct" in str(exc_info.value) + + +@pytest.mark.integration +def test_move_between_structs_fails(catalog: Catalog) -> None: + tbl = _create_table_with_schema( + catalog, + Schema( + NestedField( + field_id=1, + name="s1", + field_type=StructType( + NestedField(field_id=3, name="a", field_type=IntegerType(), required=True), + NestedField(field_id=4, name="b", field_type=IntegerType(), required=True), + ), + required=False, + ), + NestedField( + field_id=2, + name="s2", + field_type=StructType( + NestedField(field_id=5, name="x", field_type=IntegerType(), required=True), + NestedField(field_id=6, name="y", field_type=IntegerType(), required=True), + ), + required=False, + ), + ), + ) + + with pytest.raises(ValueError) as exc_info: + with tbl.update_schema() as update: + update.move_before("s2.x", "s1.a") + + assert "Cannot move field s2.x to a different struct" in str(exc_info.value) + + +@pytest.mark.integration +def test_add_existing_identifier_fields(catalog: Catalog) -> None: + tbl = _create_table_with_schema( + catalog, + Schema(NestedField(field_id=1, name="foo", field_type=StringType(), required=True), identifier_field_ids=[1]), + ) + + with tbl.update_schema() as update_schema: + update_schema.set_identifier_fields("foo") + + assert tbl.schema().identifier_field_names() == {"foo"} + + +@pytest.mark.integration +def test_add_new_identifiers_field_columns(catalog: Catalog) -> None: + tbl = _create_table_with_schema( + catalog, + Schema(NestedField(field_id=1, name="foo", field_type=StringType(), required=True), identifier_field_ids=[1]), + ) + + with tbl.update_schema(allow_incompatible_changes=True) as update_schema: + update_schema.add_column("new_field", StringType(), required=True) + update_schema.set_identifier_fields("foo", "new_field") + + assert tbl.schema().identifier_field_names() == {"foo", "new_field"} + + +@pytest.mark.integration +def test_add_new_identifiers_field_columns_out_of_order(catalog: Catalog) -> None: + tbl = _create_table_with_schema( + catalog, + Schema(NestedField(field_id=1, name="foo", field_type=StringType(), required=True), identifier_field_ids=[1]), + ) + + with tbl.update_schema(allow_incompatible_changes=True) as update_schema: + update_schema.add_column("new_field", StringType(), required=True) + update_schema.set_identifier_fields("foo", "new_field") + + assert tbl.schema().identifier_field_names() == {"foo", "new_field"} + + +@pytest.mark.integration +def test_add_nested_identifier_field_columns(catalog: Catalog) -> None: + tbl = _create_table_with_schema( + catalog, + Schema(NestedField(field_id=1, name="foo", field_type=StringType(), required=True), identifier_field_ids=[1]), + ) + + with tbl.update_schema(allow_incompatible_changes=True) as update_schema: + update_schema.add_column( + "required_struct", StructType(NestedField(field_id=3, name="field", type=StringType(), required=True)), required=True + ) + + with tbl.update_schema() as update_schema: + update_schema.set_identifier_fields("required_struct.field") + + assert tbl.schema().identifier_field_names() == {"required_struct.field"} + + +@pytest.mark.integration +def test_add_nested_identifier_field_columns_single_transaction(catalog: Catalog) -> None: + tbl = _create_table_with_schema( + catalog, + Schema(NestedField(field_id=1, name="foo", field_type=StringType(), required=True), identifier_field_ids=[1]), + ) + + with tbl.update_schema(allow_incompatible_changes=True) as update_schema: + update_schema.add_column( + "new", StructType(NestedField(field_id=3, name="field", type=StringType(), required=True)), required=True + ) + update_schema.set_identifier_fields("new.field") + + assert tbl.schema().identifier_field_names() == {"new.field"} + + +@pytest.mark.integration +def test_add_nested_nested_identifier_field_columns(catalog: Catalog) -> None: + tbl = _create_table_with_schema( + catalog, + Schema(NestedField(field_id=1, name="foo", field_type=StringType(), required=True), identifier_field_ids=[1]), + ) + + with tbl.update_schema(allow_incompatible_changes=True) as update_schema: + update_schema.add_column( + "new", + StructType( + NestedField( + field_id=3, + name="field", + type=StructType(NestedField(field_id=4, name="nested", type=StringType(), required=True)), + required=True, + ) + ), + required=True, + ) + update_schema.set_identifier_fields("new.field.nested") + + assert tbl.schema().identifier_field_names() == {"new.field.nested"} + + +@pytest.mark.integration +def test_add_dotted_identifier_field_columns(catalog: Catalog) -> None: + tbl = _create_table_with_schema( + catalog, + Schema(NestedField(field_id=1, name="foo", field_type=StringType(), required=True), identifier_field_ids=[1]), + ) + + with tbl.update_schema(allow_incompatible_changes=True) as update_schema: + update_schema.add_column(("dot.field",), StringType(), required=True) + update_schema.set_identifier_fields("dot.field") + + assert tbl.schema().identifier_field_names() == {"dot.field"} + + +@pytest.mark.integration +def test_remove_identifier_fields(catalog: Catalog) -> None: + tbl = _create_table_with_schema( + catalog, + Schema(NestedField(field_id=1, name="foo", field_type=StringType(), required=True), identifier_field_ids=[1]), + ) + + with tbl.update_schema(allow_incompatible_changes=True) as update_schema: + update_schema.add_column(("new_field",), StringType(), required=True) + update_schema.add_column(("new_field2",), StringType(), required=True) + update_schema.set_identifier_fields("foo", "new_field", "new_field2") + + assert tbl.schema().identifier_field_names() == {"foo", "new_field", "new_field2"} + + with tbl.update_schema(allow_incompatible_changes=True) as update_schema: + update_schema.set_identifier_fields() + + assert tbl.schema().identifier_field_names() == set() + + +@pytest.mark.integration +def test_set_identifier_field_fails_schema(catalog: Catalog) -> None: + tbl = _create_table_with_schema( + catalog, + Schema( + NestedField(field_id=1, name="id", field_type=IntegerType(), required=False), + NestedField(field_id=2, name="float", field_type=FloatType(), required=True), + NestedField(field_id=3, name="double", field_type=DoubleType(), required=True), + identifier_field_ids=[], + ), + ) + + with pytest.raises(ValueError) as exc_info: + with tbl.update_schema() as update_schema: + update_schema.set_identifier_fields("id") + + assert "Identifier field 1 invalid: not a required field" in str(exc_info.value) + + with pytest.raises(ValueError) as exc_info: + with tbl.update_schema() as update_schema: + update_schema.set_identifier_fields("float") + + assert "Identifier field 2 invalid: must not be float or double field" in str(exc_info.value) + + with pytest.raises(ValueError) as exc_info: + with tbl.update_schema() as update_schema: + update_schema.set_identifier_fields("double") + + assert "Identifier field 3 invalid: must not be float or double field" in str(exc_info.value) + + with pytest.raises(ValueError) as exc_info: + with tbl.update_schema() as update_schema: + update_schema.set_identifier_fields("unknown") + + assert "Cannot find identifier field unknown. In case of deletion, update the identifier fields first." in str(exc_info.value) + + +@pytest.mark.integration +def test_set_identifier_field_fails(nested_table: Table) -> None: + with pytest.raises(ValueError) as exc_info: + with nested_table.update_schema() as update_schema: + update_schema.set_identifier_fields("location") + + assert "Identifier field 6 invalid: not a primitive type field" in str(exc_info.value) + + with pytest.raises(ValueError) as exc_info: + with nested_table.update_schema() as update_schema: + update_schema.set_identifier_fields("baz") + + assert "Identifier field 3 invalid: not a required field" in str(exc_info.value) + + with pytest.raises(ValueError) as exc_info: + with nested_table.update_schema() as update_schema: + update_schema.set_identifier_fields("person.name") + + assert "Identifier field 16 invalid: not a required field" in str(exc_info.value) + + +@pytest.mark.integration +def test_delete_identifier_field_columns(catalog: Catalog) -> None: + tbl = _create_table_with_schema( + catalog, + Schema(NestedField(field_id=1, name="foo", field_type=StringType(), required=True), identifier_field_ids=[1]), + ) + + with tbl.update_schema() as schema_update: + schema_update.delete_column("foo") + schema_update.set_identifier_fields() + + tbl = _create_table_with_schema( + catalog, + Schema(NestedField(field_id=1, name="foo", field_type=StringType(), required=True), identifier_field_ids=[1]), + ) + + with tbl.update_schema() as schema_update: + schema_update.set_identifier_fields() + schema_update.delete_column("foo") + + +@pytest.mark.integration +def test_delete_containing_nested_identifier_field_columns_fails(catalog: Catalog) -> None: + tbl = _create_table_with_schema( + catalog, + Schema(NestedField(field_id=1, name="foo", field_type=StringType(), required=True), identifier_field_ids=[1]), + ) + + with tbl.update_schema(allow_incompatible_changes=True) as schema_update: + schema_update.add_column( + "out", StructType(NestedField(field_id=3, name="nested", field_type=StringType(), required=True)), required=True + ) + schema_update.set_identifier_fields("out.nested") + + assert tbl.schema() == Schema( + NestedField(field_id=1, name="foo", field_type=StringType(), required=True), + NestedField( + field_id=2, + name="out", + field_type=StructType(NestedField(field_id=3, name="nested", field_type=StringType(), required=True)), + required=True, + ), + identifier_field_ids=[3], + ) + + with pytest.raises(ValueError) as exc_info: + with tbl.update_schema() as schema_update: + schema_update.delete_column("out") + + assert "Cannot find identifier field out.nested. In case of deletion, update the identifier fields first." in str(exc_info) + + +@pytest.mark.integration +def test_rename_identifier_fields(catalog: Catalog) -> None: + tbl = _create_table_with_schema( + catalog, + Schema(NestedField(field_id=1, name="foo", field_type=StringType(), required=True), identifier_field_ids=[1]), + ) + + with tbl.update_schema() as schema_update: + schema_update.rename_column("foo", "bar") + + assert tbl.schema().identifier_field_ids == [1] + assert tbl.schema().identifier_field_names() == {"bar"} + + +@pytest.mark.integration +def test_move_identifier_fields(catalog: Catalog) -> None: + tbl = _create_table_with_schema( + catalog, + Schema( + NestedField(field_id=1, name="id", field_type=LongType(), required=True), + NestedField(field_id=2, name="data", field_type=StringType(), required=True), + identifier_field_ids=[1], + ), + ) + + with tbl.update_schema() as update: + update.move_before("data", "id") + + assert tbl.schema().identifier_field_ids == [1] + assert tbl.schema().identifier_field_names() == {"id"} + + with tbl.update_schema() as update: + update.move_after("id", "data") + + assert tbl.schema().identifier_field_ids == [1] + assert tbl.schema().identifier_field_names() == {"id"} + + with tbl.update_schema() as update: + update.move_first("data") + + assert tbl.schema().identifier_field_ids == [1] + assert tbl.schema().identifier_field_names() == {"id"} + + +@pytest.mark.integration +def test_move_identifier_fields_case_insensitive(catalog: Catalog) -> None: + tbl = _create_table_with_schema( + catalog, + Schema( + NestedField(field_id=1, name="id", field_type=LongType(), required=True), + NestedField(field_id=2, name="data", field_type=StringType(), required=True), + identifier_field_ids=[1], + ), + ) + + with tbl.update_schema(case_sensitive=False) as update: + update.move_before("DATA", "ID") + + assert tbl.schema().identifier_field_ids == [1] + assert tbl.schema().identifier_field_names() == {"id"} + + with tbl.update_schema(case_sensitive=False) as update: + update.move_after("ID", "DATA") + + assert tbl.schema().identifier_field_ids == [1] + assert tbl.schema().identifier_field_names() == {"id"} + + with tbl.update_schema(case_sensitive=False) as update: + update.move_first("DATA") + + assert tbl.schema().identifier_field_ids == [1] + assert tbl.schema().identifier_field_names() == {"id"} + + +@pytest.mark.integration +def test_two_add_schemas_in_a_single_transaction(catalog: Catalog) -> None: + tbl = _create_table_with_schema( + catalog, + Schema( + NestedField(field_id=1, name="foo", field_type=StringType()), + ), + ) + + with pytest.raises(ValueError) as exc_info: + with tbl.transaction() as tr: + with tr.update_schema() as update: + update.add_column("bar", field_type=StringType()) + with tr.update_schema() as update: + update.add_column("baz", field_type=StringType()) + + assert "Updates in a single commit need to be unique, duplicate: " in str( + exc_info.value + ) diff --git a/tests/test_schema.py b/tests/test_schema.py new file mode 100644 index 0000000000..610298b84a --- /dev/null +++ b/tests/test_schema.py @@ -0,0 +1,790 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from textwrap import dedent +from typing import Any, Dict + +import pytest + +from pyiceberg import schema +from pyiceberg.exceptions import ResolveError +from pyiceberg.expressions import Accessor +from pyiceberg.schema import ( + Schema, + build_position_accessors, + promote, + prune_columns, +) +from pyiceberg.typedef import EMPTY_DICT, StructProtocol +from pyiceberg.types import ( + BinaryType, + BooleanType, + DateType, + DecimalType, + DoubleType, + FixedType, + FloatType, + IcebergType, + IntegerType, + ListType, + LongType, + MapType, + NestedField, + StringType, + StructType, + TimestampType, + TimestamptzType, + TimeType, + UUIDType, +) + +TEST_PRIMITIVE_TYPES = [ + BooleanType(), + IntegerType(), + LongType(), + FloatType(), + DoubleType(), + DecimalType(10, 2), + DecimalType(100, 2), + StringType(), + DateType(), + TimeType(), + TimestamptzType(), + TimestampType(), + BinaryType(), + FixedType(16), + FixedType(20), + UUIDType(), +] + + +def test_schema_str(table_schema_simple: Schema) -> None: + """Test casting a schema to a string""" + assert str(table_schema_simple) == dedent( + """\ + table { + 1: foo: optional string + 2: bar: required int + 3: baz: optional boolean + }""" + ) + + +def test_schema_repr_single_field() -> None: + """Test schema representation""" + actual = repr(schema.Schema(NestedField(field_id=1, name="foo", field_type=StringType()), schema_id=1)) + expected = "Schema(NestedField(field_id=1, name='foo', field_type=StringType(), required=True), schema_id=1, identifier_field_ids=[])" + assert expected == actual + + +def test_schema_repr_two_fields() -> None: + """Test schema representation""" + actual = repr( + schema.Schema( + NestedField(field_id=1, name="foo", field_type=StringType()), + NestedField(field_id=2, name="bar", field_type=IntegerType(), required=False), + schema_id=1, + ) + ) + expected = "Schema(NestedField(field_id=1, name='foo', field_type=StringType(), required=True), NestedField(field_id=2, name='bar', field_type=IntegerType(), required=False), schema_id=1, identifier_field_ids=[])" + assert expected == actual + + +def test_schema_raise_on_duplicate_names() -> None: + """Test schema representation""" + with pytest.raises(ValueError) as exc_info: + schema.Schema( + NestedField(field_id=1, name="foo", field_type=StringType(), required=False), + NestedField(field_id=2, name="bar", field_type=IntegerType(), required=True), + NestedField(field_id=3, name="baz", field_type=BooleanType(), required=False), + NestedField(field_id=4, name="baz", field_type=BooleanType(), required=False), + schema_id=1, + identifier_field_ids=[2], + ) + + assert "Invalid schema, multiple fields for name baz: 3 and 4" in str(exc_info.value) + + +def test_schema_index_by_id_visitor(table_schema_nested: Schema) -> None: + """Test index_by_id visitor function""" + index = schema.index_by_id(table_schema_nested) + assert index == { + 1: NestedField(field_id=1, name="foo", field_type=StringType(), required=False), + 2: NestedField(field_id=2, name="bar", field_type=IntegerType(), required=True), + 3: NestedField(field_id=3, name="baz", field_type=BooleanType(), required=False), + 4: NestedField( + field_id=4, + name="qux", + field_type=ListType(element_id=5, element_type=StringType(), element_required=True), + required=True, + ), + 5: NestedField(field_id=5, name="element", field_type=StringType(), required=True), + 6: NestedField( + field_id=6, + name="quux", + field_type=MapType( + key_id=7, + key_type=StringType(), + value_id=8, + value_type=MapType(key_id=9, key_type=StringType(), value_id=10, value_type=IntegerType(), value_required=True), + value_required=True, + ), + required=True, + ), + 7: NestedField(field_id=7, name="key", field_type=StringType(), required=True), + 9: NestedField(field_id=9, name="key", field_type=StringType(), required=True), + 8: NestedField( + field_id=8, + name="value", + field_type=MapType(key_id=9, key_type=StringType(), value_id=10, value_type=IntegerType(), value_required=True), + required=True, + ), + 10: NestedField(field_id=10, name="value", field_type=IntegerType(), required=True), + 11: NestedField( + field_id=11, + name="location", + field_type=ListType( + element_id=12, + element_type=StructType( + NestedField(field_id=13, name="latitude", field_type=FloatType(), required=False), + NestedField(field_id=14, name="longitude", field_type=FloatType(), required=False), + ), + element_required=True, + ), + required=True, + ), + 12: NestedField( + field_id=12, + name="element", + field_type=StructType( + NestedField(field_id=13, name="latitude", field_type=FloatType(), required=False), + NestedField(field_id=14, name="longitude", field_type=FloatType(), required=False), + ), + required=True, + ), + 13: NestedField(field_id=13, name="latitude", field_type=FloatType(), required=False), + 14: NestedField(field_id=14, name="longitude", field_type=FloatType(), required=False), + 15: NestedField( + field_id=15, + name="person", + field_type=StructType( + NestedField(field_id=16, name="name", field_type=StringType(), required=False), + NestedField(field_id=17, name="age", field_type=IntegerType(), required=True), + ), + required=False, + ), + 16: NestedField(field_id=16, name="name", field_type=StringType(), required=False), + 17: NestedField(field_id=17, name="age", field_type=IntegerType(), required=True), + } + + +def test_schema_index_by_name_visitor(table_schema_nested: Schema) -> None: + """Test index_by_name visitor function""" + table_schema_nested = schema.Schema( + NestedField(field_id=1, name="foo", field_type=StringType(), required=False), + NestedField(field_id=2, name="bar", field_type=IntegerType(), required=True), + NestedField(field_id=3, name="baz", field_type=BooleanType(), required=False), + NestedField( + field_id=4, + name="qux", + field_type=ListType(element_id=5, element_type=StringType(), element_required=True), + required=True, + ), + NestedField( + field_id=6, + name="quux", + field_type=MapType( + key_id=7, + key_type=StringType(), + value_id=8, + value_type=MapType(key_id=9, key_type=StringType(), value_id=10, value_type=IntegerType(), value_required=True), + value_required=True, + ), + required=True, + ), + NestedField( + field_id=11, + name="location", + field_type=ListType( + element_id=12, + element_type=StructType( + NestedField(field_id=13, name="latitude", field_type=FloatType(), required=False), + NestedField(field_id=14, name="longitude", field_type=FloatType(), required=False), + ), + element_required=True, + ), + required=True, + ), + NestedField( + field_id=15, + name="person", + field_type=StructType( + NestedField(field_id=16, name="name", field_type=StringType(), required=False), + NestedField(field_id=17, name="age", field_type=IntegerType(), required=True), + ), + required=False, + ), + schema_id=1, + identifier_field_ids=[2], + ) + index = schema.index_by_name(table_schema_nested) + assert index == { + "foo": 1, + "bar": 2, + "baz": 3, + "qux": 4, + "qux.element": 5, + "quux": 6, + "quux.key": 7, + "quux.value": 8, + "quux.value.key": 9, + "quux.value.value": 10, + "location": 11, + "location.element": 12, + "location.element.latitude": 13, + "location.element.longitude": 14, + "location.latitude": 13, + "location.longitude": 14, + "person": 15, + "person.name": 16, + "person.age": 17, + } + + +def test_schema_find_column_name(table_schema_nested: Schema) -> None: + """Test finding a column name using its field ID""" + assert table_schema_nested.find_column_name(1) == "foo" + assert table_schema_nested.find_column_name(2) == "bar" + assert table_schema_nested.find_column_name(3) == "baz" + assert table_schema_nested.find_column_name(4) == "qux" + assert table_schema_nested.find_column_name(5) == "qux.element" + assert table_schema_nested.find_column_name(6) == "quux" + assert table_schema_nested.find_column_name(7) == "quux.key" + assert table_schema_nested.find_column_name(8) == "quux.value" + assert table_schema_nested.find_column_name(9) == "quux.value.key" + assert table_schema_nested.find_column_name(10) == "quux.value.value" + assert table_schema_nested.find_column_name(11) == "location" + assert table_schema_nested.find_column_name(12) == "location.element" + assert table_schema_nested.find_column_name(13) == "location.element.latitude" + assert table_schema_nested.find_column_name(14) == "location.element.longitude" + + +def test_schema_find_column_name_on_id_not_found(table_schema_nested: Schema) -> None: + """Test raising an error when a field ID cannot be found""" + assert table_schema_nested.find_column_name(99) is None + + +def test_schema_find_column_name_by_id(table_schema_simple: Schema) -> None: + """Test finding a column name given its field ID""" + assert table_schema_simple.find_column_name(1) == "foo" + assert table_schema_simple.find_column_name(2) == "bar" + assert table_schema_simple.find_column_name(3) == "baz" + + +def test_schema_find_field_by_id(table_schema_simple: Schema) -> None: + """Test finding a column using its field ID""" + index = schema.index_by_id(table_schema_simple) + + column1 = index[1] + assert isinstance(column1, NestedField) + assert column1.field_id == 1 + assert column1.field_type == StringType() + assert column1.required is False + + column2 = index[2] + assert isinstance(column2, NestedField) + assert column2.field_id == 2 + assert column2.field_type == IntegerType() + assert column2.required is True + + column3 = index[3] + assert isinstance(column3, NestedField) + assert column3.field_id == 3 + assert column3.field_type == BooleanType() + assert column3.required is False + + +def test_schema_find_field_by_id_raise_on_unknown_field(table_schema_simple: Schema) -> None: + """Test raising when the field ID is not found among columns""" + index = schema.index_by_id(table_schema_simple) + with pytest.raises(Exception) as exc_info: + _ = index[4] + assert str(exc_info.value) == "4" + + +def test_schema_find_field_type_by_id(table_schema_simple: Schema) -> None: + """Test retrieving a columns' type using its field ID""" + index = schema.index_by_id(table_schema_simple) + assert index[1] == NestedField(field_id=1, name="foo", field_type=StringType(), required=False) + assert index[2] == NestedField(field_id=2, name="bar", field_type=IntegerType(), required=True) + assert index[3] == NestedField(field_id=3, name="baz", field_type=BooleanType(), required=False) + + +def test_index_by_id_schema_visitor_raise_on_unregistered_type() -> None: + """Test raising a NotImplementedError when an invalid type is provided to the index_by_id function""" + with pytest.raises(NotImplementedError) as exc_info: + schema.index_by_id("foo") # type: ignore + assert "Cannot visit non-type: foo" in str(exc_info.value) + + +def test_schema_find_field(table_schema_simple: Schema) -> None: + """Test finding a field in a schema""" + assert ( + table_schema_simple.find_field(1) + == table_schema_simple.find_field("foo") + == table_schema_simple.find_field("FOO", case_sensitive=False) + == NestedField(field_id=1, name="foo", field_type=StringType(), required=False) + ) + assert ( + table_schema_simple.find_field(2) + == table_schema_simple.find_field("bar") + == table_schema_simple.find_field("BAR", case_sensitive=False) + == NestedField(field_id=2, name="bar", field_type=IntegerType(), required=True) + ) + assert ( + table_schema_simple.find_field(3) + == table_schema_simple.find_field("baz") + == table_schema_simple.find_field("BAZ", case_sensitive=False) + == NestedField(field_id=3, name="baz", field_type=BooleanType(), required=False) + ) + + +def test_schema_find_type(table_schema_simple: Schema) -> None: + """Test finding the type of a column given its field ID""" + assert ( + table_schema_simple.find_type(1) + == table_schema_simple.find_type("foo") + == table_schema_simple.find_type("FOO", case_sensitive=False) + == StringType() + ) + assert ( + table_schema_simple.find_type(2) + == table_schema_simple.find_type("bar") + == table_schema_simple.find_type("BAR", case_sensitive=False) + == IntegerType() + ) + assert ( + table_schema_simple.find_type(3) + == table_schema_simple.find_type("baz") + == table_schema_simple.find_type("BAZ", case_sensitive=False) + == BooleanType() + ) + + +def test_build_position_accessors(table_schema_nested: Schema) -> None: + accessors = build_position_accessors(table_schema_nested) + assert accessors == { + 1: Accessor(position=0, inner=None), + 2: Accessor(position=1, inner=None), + 3: Accessor(position=2, inner=None), + 4: Accessor(position=3, inner=None), + 6: Accessor(position=4, inner=None), + 11: Accessor(position=5, inner=None), + 16: Accessor(position=6, inner=Accessor(position=0, inner=None)), + 17: Accessor(position=6, inner=Accessor(position=1, inner=None)), + } + + +def test_build_position_accessors_with_struct(table_schema_nested: Schema) -> None: + class TestStruct(StructProtocol): + def __init__(self, pos: Dict[int, Any] = EMPTY_DICT): + self._pos: Dict[int, Any] = pos + + def __setitem__(self, pos: int, value: Any) -> None: + pass + + def __getitem__(self, pos: int) -> Any: + return self._pos[pos] + + accessors = build_position_accessors(table_schema_nested) + container = TestStruct({6: TestStruct({0: "name"})}) + inner_accessor = accessors.get(16) + assert inner_accessor + assert inner_accessor.get(container) == "name" + + +def test_serialize_schema(table_schema_simple: Schema) -> None: + actual = table_schema_simple.model_dump_json() + expected = """{"type":"struct","fields":[{"id":1,"name":"foo","type":"string","required":false},{"id":2,"name":"bar","type":"int","required":true},{"id":3,"name":"baz","type":"boolean","required":false}],"schema-id":1,"identifier-field-ids":[2]}""" + assert actual == expected + + +def test_deserialize_schema(table_schema_simple: Schema) -> None: + actual = Schema.model_validate_json( + """{"type": "struct", "fields": [{"id": 1, "name": "foo", "type": "string", "required": false}, {"id": 2, "name": "bar", "type": "int", "required": true}, {"id": 3, "name": "baz", "type": "boolean", "required": false}], "schema-id": 1, "identifier-field-ids": [2]}""" + ) + expected = table_schema_simple + assert actual == expected + + +def test_prune_columns_string(table_schema_nested_with_struct_key_map: Schema) -> None: + assert prune_columns(table_schema_nested_with_struct_key_map, {1}, False) == Schema( + NestedField(field_id=1, name="foo", field_type=StringType(), required=True), schema_id=1, identifier_field_ids=[1] + ) + + +def test_prune_columns_string_full(table_schema_nested_with_struct_key_map: Schema) -> None: + assert prune_columns(table_schema_nested_with_struct_key_map, {1}, True) == Schema( + NestedField(field_id=1, name="foo", field_type=StringType(), required=True), + schema_id=1, + identifier_field_ids=[1], + ) + + +def test_prune_columns_list(table_schema_nested: Schema) -> None: + assert prune_columns(table_schema_nested, {5}, False) == Schema( + NestedField( + field_id=4, + name="qux", + field_type=ListType(type="list", element_id=5, element_type=StringType(), element_required=True), + required=True, + ), + schema_id=1, + identifier_field_ids=[], + ) + + +def test_prune_columns_list_itself(table_schema_nested: Schema) -> None: + with pytest.raises(ValueError) as exc_info: + assert prune_columns(table_schema_nested, {4}, False) + assert "Cannot explicitly project List or Map types, 4:qux of type list was selected" in str(exc_info.value) + + +def test_prune_columns_list_full(table_schema_nested: Schema) -> None: + assert prune_columns(table_schema_nested, {5}, True) == Schema( + NestedField( + field_id=4, + name="qux", + field_type=ListType(type="list", element_id=5, element_type=StringType(), element_required=True), + required=True, + ), + schema_id=1, + identifier_field_ids=[], + ) + + +def test_prune_columns_map(table_schema_nested: Schema) -> None: + assert prune_columns(table_schema_nested, {9}, False) == Schema( + NestedField( + field_id=6, + name="quux", + field_type=MapType( + type="map", + key_id=7, + key_type=StringType(), + value_id=8, + value_type=MapType( + type="map", key_id=9, key_type=StringType(), value_id=10, value_type=IntegerType(), value_required=True + ), + value_required=True, + ), + required=True, + ), + schema_id=1, + identifier_field_ids=[], + ) + + +def test_prune_columns_map_itself(table_schema_nested: Schema) -> None: + with pytest.raises(ValueError) as exc_info: + assert prune_columns(table_schema_nested, {6}, False) + assert "Cannot explicitly project List or Map types, 6:quux of type map> was selected" in str( + exc_info.value + ) + + +def test_prune_columns_map_full(table_schema_nested: Schema) -> None: + assert prune_columns(table_schema_nested, {9}, True) == Schema( + NestedField( + field_id=6, + name="quux", + field_type=MapType( + type="map", + key_id=7, + key_type=StringType(), + value_id=8, + value_type=MapType( + type="map", key_id=9, key_type=StringType(), value_id=10, value_type=IntegerType(), value_required=True + ), + value_required=True, + ), + required=True, + ), + schema_id=1, + identifier_field_ids=[], + ) + + +def test_prune_columns_map_key(table_schema_nested: Schema) -> None: + assert prune_columns(table_schema_nested, {10}, False) == Schema( + NestedField( + field_id=6, + name="quux", + field_type=MapType( + type="map", + key_id=7, + key_type=StringType(), + value_id=8, + value_type=MapType( + type="map", key_id=9, key_type=StringType(), value_id=10, value_type=IntegerType(), value_required=True + ), + value_required=True, + ), + required=True, + ), + schema_id=1, + identifier_field_ids=[], + ) + + +def test_prune_columns_struct(table_schema_nested: Schema) -> None: + assert prune_columns(table_schema_nested, {16}, False) == Schema( + NestedField( + field_id=15, + name="person", + field_type=StructType(NestedField(field_id=16, name="name", field_type=StringType(), required=False)), + required=False, + ), + schema_id=1, + identifier_field_ids=[], + ) + + +def test_prune_columns_struct_full(table_schema_nested: Schema) -> None: + actual = prune_columns(table_schema_nested, {16}, True) + assert actual == Schema( + NestedField( + field_id=15, + name="person", + field_type=StructType(NestedField(field_id=16, name="name", field_type=StringType(), required=False)), + required=False, + ), + schema_id=1, + identifier_field_ids=[], + ) + + +def test_prune_columns_empty_struct() -> None: + schema_empty_struct = Schema( + NestedField( + field_id=15, + name="person", + field_type=StructType(), + required=False, + ) + ) + assert prune_columns(schema_empty_struct, {15}, False) == Schema( + NestedField(field_id=15, name="person", field_type=StructType(), required=False), schema_id=0, identifier_field_ids=[] + ) + + +def test_prune_columns_empty_struct_full() -> None: + schema_empty_struct = Schema( + NestedField( + field_id=15, + name="person", + field_type=StructType(), + required=False, + ) + ) + assert prune_columns(schema_empty_struct, {15}, True) == Schema( + NestedField(field_id=15, name="person", field_type=StructType(), required=False), schema_id=0, identifier_field_ids=[] + ) + + +def test_prune_columns_struct_in_map() -> None: + table_schema_nested = Schema( + NestedField( + field_id=6, + name="id_to_person", + field_type=MapType( + key_id=7, + key_type=IntegerType(), + value_id=8, + value_type=StructType( + NestedField(field_id=10, name="name", field_type=StringType(), required=False), + NestedField(field_id=11, name="age", field_type=IntegerType(), required=True), + ), + value_required=True, + ), + required=True, + ), + schema_id=1, + identifier_field_ids=[], + ) + assert prune_columns(table_schema_nested, {11}, False) == Schema( + NestedField( + field_id=6, + name="id_to_person", + field_type=MapType( + type="map", + key_id=7, + key_type=IntegerType(), + value_id=8, + value_type=StructType(NestedField(field_id=11, name="age", field_type=IntegerType(), required=True)), + value_required=True, + ), + required=True, + ), + schema_id=1, + identifier_field_ids=[], + ) + + +def test_prune_columns_struct_in_map_full() -> None: + table_schema_nested = Schema( + NestedField( + field_id=6, + name="id_to_person", + field_type=MapType( + key_id=7, + key_type=IntegerType(), + value_id=8, + value_type=StructType( + NestedField(field_id=10, name="name", field_type=StringType(), required=False), + NestedField(field_id=11, name="age", field_type=IntegerType(), required=True), + ), + value_required=True, + ), + required=True, + ), + schema_id=1, + identifier_field_ids=[], + ) + assert prune_columns(table_schema_nested, {11}, True) == Schema( + NestedField( + field_id=6, + name="id_to_person", + field_type=MapType( + type="map", + key_id=7, + key_type=IntegerType(), + value_id=8, + value_type=StructType(NestedField(field_id=11, name="age", field_type=IntegerType(), required=True)), + value_required=True, + ), + required=True, + ), + schema_id=1, + identifier_field_ids=[], + ) + + +def test_prune_columns_select_original_schema(table_schema_nested: Schema) -> None: + ids = set(range(table_schema_nested.highest_field_id)) + assert prune_columns(table_schema_nested, ids, True) == table_schema_nested + + +def test_schema_select(table_schema_nested: Schema) -> None: + assert table_schema_nested.select("bar", "baz") == Schema( + NestedField(field_id=2, name="bar", field_type=IntegerType(), required=True), + NestedField(field_id=3, name="baz", field_type=BooleanType(), required=False), + schema_id=1, + identifier_field_ids=[2], + ) + + +def test_schema_select_case_insensitive(table_schema_nested: Schema) -> None: + assert table_schema_nested.select("BAZ", case_sensitive=False) == Schema( + NestedField(field_id=3, name="baz", field_type=BooleanType(), required=False), schema_id=1, identifier_field_ids=[] + ) + + +def test_schema_select_cant_be_found(table_schema_nested: Schema) -> None: + with pytest.raises(ValueError) as exc_info: + table_schema_nested.select("BAZ", case_sensitive=True) + assert "Could not find column: 'BAZ'" in str(exc_info.value) + + +def should_promote(file_type: IcebergType, read_type: IcebergType) -> bool: + if isinstance(file_type, IntegerType) and isinstance(read_type, LongType): + return True + if isinstance(file_type, FloatType) and isinstance(read_type, DoubleType): + return True + if isinstance(file_type, StringType) and isinstance(read_type, BinaryType): + return True + if isinstance(file_type, BinaryType) and isinstance(read_type, StringType): + return True + if isinstance(file_type, DecimalType) and isinstance(read_type, DecimalType): + return file_type.precision <= read_type.precision and file_type.scale == file_type.scale + if isinstance(file_type, FixedType) and isinstance(read_type, UUIDType) and len(file_type) == 16: + return True + return False + + +def test_identifier_fields_fails(table_schema_nested_with_struct_key_map: Schema) -> None: + with pytest.raises(ValueError) as exc_info: + Schema(*table_schema_nested_with_struct_key_map.fields, schema_id=1, identifier_field_ids=[999]) + assert "Could not find field with id: 999" in str(exc_info.value) + + with pytest.raises(ValueError) as exc_info: + Schema(*table_schema_nested_with_struct_key_map.fields, schema_id=1, identifier_field_ids=[11]) + assert "Identifier field 11 invalid: not a primitive type field" in str(exc_info.value) + + with pytest.raises(ValueError) as exc_info: + Schema(*table_schema_nested_with_struct_key_map.fields, schema_id=1, identifier_field_ids=[3]) + assert "Identifier field 3 invalid: not a required field" in str(exc_info.value) + + with pytest.raises(ValueError) as exc_info: + Schema(*table_schema_nested_with_struct_key_map.fields, schema_id=1, identifier_field_ids=[28]) + assert "Identifier field 28 invalid: must not be float or double field" in str(exc_info.value) + + with pytest.raises(ValueError) as exc_info: + Schema(*table_schema_nested_with_struct_key_map.fields, schema_id=1, identifier_field_ids=[29]) + assert "Identifier field 29 invalid: must not be float or double field" in str(exc_info.value) + + with pytest.raises(ValueError) as exc_info: + Schema(*table_schema_nested_with_struct_key_map.fields, schema_id=1, identifier_field_ids=[23]) + assert ( + "Cannot add field zip as an identifier field: must not be nested in %s" + % table_schema_nested_with_struct_key_map.find_field("location") + in str(exc_info.value) + ) + + with pytest.raises(ValueError) as exc_info: + Schema(*table_schema_nested_with_struct_key_map.fields, schema_id=1, identifier_field_ids=[26]) + assert ( + "Cannot add field x as an identifier field: must not be nested in %s" + % table_schema_nested_with_struct_key_map.find_field("points") + in str(exc_info.value) + ) + + with pytest.raises(ValueError) as exc_info: + Schema(*table_schema_nested_with_struct_key_map.fields, schema_id=1, identifier_field_ids=[17]) + assert ( + "Cannot add field age as an identifier field: must not be nested in an optional field %s" + % table_schema_nested_with_struct_key_map.find_field("person") + in str(exc_info.value) + ) + + +@pytest.mark.parametrize( + "file_type", + TEST_PRIMITIVE_TYPES, +) +@pytest.mark.parametrize( + "read_type", + TEST_PRIMITIVE_TYPES, +) +def test_promotion(file_type: IcebergType, read_type: IcebergType) -> None: + if file_type == read_type: + return + if should_promote(file_type, read_type): + assert promote(file_type, read_type) == read_type + else: + with pytest.raises(ResolveError): + promote(file_type, read_type) diff --git a/tests/test_transforms.py b/tests/test_transforms.py new file mode 100644 index 0000000000..d8a2151752 --- /dev/null +++ b/tests/test_transforms.py @@ -0,0 +1,932 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# pylint: disable=eval-used,protected-access,redefined-outer-name +from datetime import date +from decimal import Decimal +from typing import Any, Callable +from uuid import UUID + +import mmh3 as mmh3 +import pytest +from pydantic import ( + BeforeValidator, + PlainSerializer, + RootModel, + WithJsonSchema, +) +from typing_extensions import Annotated + +from pyiceberg import transforms +from pyiceberg.expressions import ( + BoundEqualTo, + BoundGreaterThan, + BoundGreaterThanOrEqual, + BoundIn, + BoundLessThan, + BoundLessThanOrEqual, + BoundNotIn, + BoundNotNull, + BoundNotStartsWith, + BoundReference, + BoundStartsWith, + EqualTo, + GreaterThanOrEqual, + In, + LessThanOrEqual, + NotIn, + NotNull, + NotStartsWith, + Reference, + StartsWith, +) +from pyiceberg.expressions.literals import ( + DateLiteral, + DecimalLiteral, + TimestampLiteral, + literal, +) +from pyiceberg.schema import Accessor +from pyiceberg.transforms import ( + BucketTransform, + DayTransform, + HourTransform, + IdentityTransform, + MonthTransform, + TimeTransform, + Transform, + TruncateTransform, + UnknownTransform, + VoidTransform, + YearTransform, + parse_transform, +) +from pyiceberg.types import ( + BinaryType, + BooleanType, + DateType, + DecimalType, + DoubleType, + FixedType, + FloatType, + IntegerType, + LongType, + NestedField, + PrimitiveType, + StringType, + TimestampType, + TimestamptzType, + TimeType, + UUIDType, +) +from pyiceberg.utils.datetime import ( + date_str_to_days, + date_to_days, + time_str_to_micros, + timestamp_to_micros, + timestamptz_to_micros, +) + + +@pytest.mark.parametrize( + "test_input,test_type,expected", + [ + (1, IntegerType(), 1392991556), + (34, IntegerType(), 2017239379), + (34, LongType(), 2017239379), + (date_to_days(date(2017, 11, 16)), DateType(), -653330422), + (date_str_to_days("2017-11-16"), DateType(), -653330422), + (time_str_to_micros("22:31:08"), TimeType(), -662762989), + ( + timestamp_to_micros("2017-11-16T22:31:08"), + TimestampType(), + -2047944441, + ), + ( + timestamptz_to_micros("2017-11-16T14:31:08-08:00"), + TimestamptzType(), + -2047944441, + ), + (b"\x00\x01\x02\x03", BinaryType(), -188683207), + (b"\x00\x01\x02\x03", FixedType(4), -188683207), + ("iceberg", StringType(), 1210000089), + (UUID("f79c3e09-677c-4bbd-a479-3f349cb785e7"), UUIDType(), 1488055340), + (b"\xf7\x9c>\tg|K\xbd\xa4y?4\x9c\xb7\x85\xe7", UUIDType(), 1488055340), + ], +) +def test_bucket_hash_values(test_input: Any, test_type: PrimitiveType, expected: Any) -> None: + assert BucketTransform(num_buckets=8).transform(test_type, bucket=False)(test_input) == expected + + +@pytest.mark.parametrize( + "transform,value,expected", + [ + (BucketTransform(100).transform(IntegerType()), 34, 79), + (BucketTransform(100).transform(LongType()), 34, 79), + (BucketTransform(100).transform(DateType()), 17486, 26), + (BucketTransform(100).transform(TimeType()), 81068000000, 59), + (BucketTransform(100).transform(TimestampType()), 1510871468000000, 7), + (BucketTransform(100).transform(DecimalType(9, 2)), Decimal("14.20"), 59), + (BucketTransform(100).transform(StringType()), "iceberg", 89), + ( + BucketTransform(100).transform(UUIDType()), + UUID("f79c3e09-677c-4bbd-a479-3f349cb785e7"), + 40, + ), + ( + BucketTransform(100).transform(UUIDType()), + b"\xf7\x9c>\tg|K\xbd\xa4y?4\x9c\xb7\x85\xe7", + 40, + ), + (BucketTransform(128).transform(FixedType(3)), b"foo", 32), + (BucketTransform(128).transform(BinaryType()), b"\x00\x01\x02\x03", 57), + ], +) +def test_buckets(transform: Callable[[Any], int], value: Any, expected: int) -> None: + assert transform(value) == expected + + +@pytest.mark.parametrize( + "type_var", + [ + BinaryType(), + DateType(), + DecimalType(8, 5), + FixedType(8), + IntegerType(), + LongType(), + StringType(), + TimestampType(), + TimestamptzType(), + TimeType(), + UUIDType(), + ], +) +def test_bucket_method(type_var: PrimitiveType) -> None: + bucket_transform = BucketTransform(8) # type: ignore + assert str(bucket_transform) == str(eval(repr(bucket_transform))) + assert bucket_transform.can_transform(type_var) + assert bucket_transform.result_type(type_var) == IntegerType() + assert bucket_transform.num_buckets == 8 + assert bucket_transform.apply(None) is None + assert bucket_transform.to_human_string(type_var, "test") == "test" + + +def test_string_with_surrogate_pair() -> None: + string_with_surrogate_pair = "string with a surrogate pair: 💰" + as_bytes = bytes(string_with_surrogate_pair, "UTF-8") + bucket_transform = BucketTransform(100).transform(StringType(), bucket=False) + assert bucket_transform(string_with_surrogate_pair) == mmh3.hash(as_bytes) + + +@pytest.mark.parametrize( + "date_val,date_transform,expected", + [ + (47, YearTransform(), "2017"), + (575, MonthTransform(), "2017-12"), + (17501, DayTransform(), "2017-12-01"), + ], +) +def test_date_to_human_string(date_val: int, date_transform: Transform[Any, Any], expected: str) -> None: + assert date_transform.to_human_string(DateType(), date_val) == expected + + +@pytest.mark.parametrize( + "date_transform", + [ + YearTransform(), + MonthTransform(), + DayTransform(), + ], +) +def test_none_date_to_human_string(date_transform: TimeTransform[Any]) -> None: + assert date_transform.to_human_string(DateType(), None) == "null" + + +def test_hour_to_human_string() -> None: + assert HourTransform().to_human_string(TimestampType(), None) == "null" + assert HourTransform().to_human_string(TimestampType(), 420042) == "2017-12-01-18" # type: ignore + + +@pytest.mark.parametrize( + "negative_value,time_transform,expected", + [ + (-1, YearTransform(), "1969"), + (-1, MonthTransform(), "1969-12"), + (-1, DayTransform(), "1969-12-31"), + (-1, HourTransform(), "1969-12-31-23"), + ], +) +def test_negative_value_to_human_string(negative_value: int, time_transform: TimeTransform[Any], expected: str) -> None: + assert time_transform.to_human_string(TimestampType(), negative_value) == expected + + +@pytest.mark.parametrize( + "type_var", + [ + DateType(), + TimestampType(), + TimestamptzType(), + ], +) +def test_time_methods(type_var: PrimitiveType) -> None: + assert YearTransform().can_transform(type_var) + assert MonthTransform().can_transform(type_var) + assert DayTransform().can_transform(type_var) + assert YearTransform().preserves_order + assert MonthTransform().preserves_order + assert DayTransform().preserves_order + assert YearTransform().result_type(type_var) == IntegerType() + assert MonthTransform().result_type(type_var) == IntegerType() + assert DayTransform().result_type(type_var) == DateType() + assert YearTransform().dedup_name == "time" + assert MonthTransform().dedup_name == "time" + assert DayTransform().dedup_name == "time" + + +@pytest.mark.parametrize( + "transform,type_var,value,expected", + [ + (DayTransform(), DateType(), 17501, 17501), + (DayTransform(), DateType(), -1, -1), + (MonthTransform(), DateType(), 17501, 575), + (MonthTransform(), DateType(), -1, -1), + (YearTransform(), DateType(), 17501, 47), + (YearTransform(), DateType(), -1, -1), + (YearTransform(), TimestampType(), 1512151975038194, 47), + (YearTransform(), TimestampType(), -1, -1), + (MonthTransform(), TimestamptzType(), 1512151975038194, 575), + (MonthTransform(), TimestamptzType(), -1, -1), + (DayTransform(), TimestampType(), 1512151975038194, 17501), + (DayTransform(), TimestampType(), -1, -1), + ], +) +def test_time_apply_method(transform: TimeTransform[Any], type_var: PrimitiveType, value: int, expected: int) -> None: + assert transform.transform(type_var)(value) == expected + + +@pytest.mark.parametrize( + "type_var", + [ + TimestampType(), + TimestamptzType(), + ], +) +def test_hour_method(type_var: PrimitiveType) -> None: + assert HourTransform().can_transform(type_var) + assert HourTransform().result_type(type_var) == IntegerType() + assert HourTransform().transform(type_var)(1512151975038194) == 420042 # type: ignore + assert HourTransform().dedup_name == "time" + + +@pytest.mark.parametrize( + "transform,other_transform", + [ + (YearTransform(), MonthTransform()), + (YearTransform(), DayTransform()), + (YearTransform(), HourTransform()), + (MonthTransform(), DayTransform()), + (MonthTransform(), HourTransform()), + (DayTransform(), HourTransform()), + ], +) +def test_satisfies_order_of_method(transform: TimeTransform[Any], other_transform: TimeTransform[Any]) -> None: + assert transform.satisfies_order_of(transform) + assert other_transform.satisfies_order_of(transform) + assert not transform.satisfies_order_of(other_transform) + assert not transform.satisfies_order_of(VoidTransform()) + assert not other_transform.satisfies_order_of(IdentityTransform()) + + +@pytest.mark.parametrize( + "type_var,value,expected", + [ + (LongType(), None, "null"), + (DateType(), 17501, "2017-12-01"), + (TimeType(), 36775038194, "10:12:55.038194"), + (TimestamptzType(), 1512151975038194, "2017-12-01T18:12:55.038194+00:00"), + (TimestampType(), 1512151975038194, "2017-12-01T18:12:55.038194"), + (LongType(), -1234567890000, "-1234567890000"), + (StringType(), "a/b/c=d", "a/b/c=d"), + (DecimalType(9, 2), Decimal("-1.50"), "-1.50"), + (FixedType(100), b"foo", "Zm9v"), + ], +) +def test_identity_human_string(type_var: PrimitiveType, value: Any, expected: str) -> None: + identity = IdentityTransform() # type: ignore + assert identity.to_human_string(type_var, value) == expected + + +@pytest.mark.parametrize( + "type_var", + [ + BinaryType(), + BooleanType(), + DateType(), + DecimalType(8, 2), + DoubleType(), + FixedType(16), + FloatType(), + IntegerType(), + LongType(), + StringType(), + TimestampType(), + TimestamptzType(), + TimeType(), + UUIDType(), + ], +) +def test_identity_method(type_var: PrimitiveType) -> None: + identity_transform = IdentityTransform() # type: ignore + assert str(identity_transform) == str(eval(repr(identity_transform))) + assert identity_transform.can_transform(type_var) + assert identity_transform.result_type(type_var) == type_var + assert identity_transform.transform(type_var)("test") == "test" + + +@pytest.mark.parametrize("type_var", [IntegerType(), LongType()]) +@pytest.mark.parametrize( + "input_var,expected", + [(1, 0), (5, 0), (9, 0), (10, 10), (11, 10), (-1, -10), (-10, -10), (-12, -20)], +) +def test_truncate_integer(type_var: PrimitiveType, input_var: int, expected: int) -> None: + trunc = TruncateTransform(10) # type: ignore + assert trunc.transform(type_var)(input_var) == expected + + +@pytest.mark.parametrize( + "input_var,expected", + [ + (Decimal("12.34"), Decimal("12.30")), + (Decimal("12.30"), Decimal("12.30")), + (Decimal("12.29"), Decimal("12.20")), + (Decimal("0.05"), Decimal("0.00")), + (Decimal("-0.05"), Decimal("-0.10")), + ], +) +def test_truncate_decimal(input_var: Decimal, expected: Decimal) -> None: + trunc = TruncateTransform(10) # type: ignore + assert trunc.transform(DecimalType(9, 2))(input_var) == expected + + +@pytest.mark.parametrize("input_var,expected", [("abcdefg", "abcde"), ("abc", "abc")]) +def test_truncate_string(input_var: str, expected: str) -> None: + trunc = TruncateTransform(5) # type: ignore + assert trunc.transform(StringType())(input_var) == expected + + +@pytest.mark.parametrize( + "type_var,value,expected_human_str,expected", + [ + (BinaryType(), b"\x00\x01\x02\x03", "AAECAw==", b"\x00"), + (BinaryType(), bytes("\u2603de", "utf-8"), "4piDZGU=", b"\xe2"), + (DecimalType(8, 5), Decimal("14.21"), "14.21", Decimal("14.21")), + (IntegerType(), 123, "123", 123), + (LongType(), 123, "123", 123), + (StringType(), "foo", "foo", "f"), + (StringType(), "\u2603de", "\u2603de", "\u2603"), + ], +) +def test_truncate_method(type_var: PrimitiveType, value: Any, expected_human_str: str, expected: Any) -> None: + truncate_transform = TruncateTransform(1) # type: ignore + assert str(truncate_transform) == str(eval(repr(truncate_transform))) + assert truncate_transform.can_transform(type_var) + assert truncate_transform.result_type(type_var) == type_var + assert truncate_transform.to_human_string(type_var, value) == expected_human_str + assert truncate_transform.transform(type_var)(value) == expected + assert truncate_transform.to_human_string(type_var, None) == "null" + assert truncate_transform.width == 1 + assert truncate_transform.transform(type_var)(None) is None + assert truncate_transform.preserves_order + assert truncate_transform.satisfies_order_of(truncate_transform) + + +def test_unknown_transform() -> None: + unknown_transform = transforms.UnknownTransform("unknown") # type: ignore + assert str(unknown_transform) == str(eval(repr(unknown_transform))) + with pytest.raises(AttributeError): + unknown_transform.transform(StringType())("test") + assert not unknown_transform.can_transform(FixedType(5)) + assert isinstance(unknown_transform.result_type(BooleanType()), StringType) + + +def test_void_transform() -> None: + void_transform = VoidTransform() # type: ignore + assert void_transform is VoidTransform() + assert void_transform == eval(repr(void_transform)) + assert void_transform.transform(StringType())("test") is None + assert void_transform.can_transform(BooleanType()) + assert isinstance(void_transform.result_type(BooleanType()), BooleanType) + assert not void_transform.preserves_order + assert void_transform.satisfies_order_of(VoidTransform()) + assert not void_transform.satisfies_order_of(BucketTransform(100)) + assert void_transform.to_human_string(StringType(), "test") == "null" + assert void_transform.dedup_name == "void" + + +class FauxModel(RootModel): + root: Annotated[ # type: ignore + Transform, + BeforeValidator(parse_transform), + PlainSerializer(lambda c: str(c), return_type=str), # pylint: disable=W0108 + WithJsonSchema({"type": "string"}, mode="serialization"), + ] + + +def test_bucket_transform_serialize() -> None: + assert BucketTransform(num_buckets=22).model_dump_json() == '"bucket[22]"' + + +def test_bucket_transform_deserialize() -> None: + transform = FauxModel.model_validate_json('"bucket[22]"').root + assert transform == BucketTransform(num_buckets=22) + + +def test_bucket_transform_str() -> None: + assert str(BucketTransform(num_buckets=22)) == "bucket[22]" + + +def test_bucket_transform_repr() -> None: + assert repr(BucketTransform(num_buckets=22)) == "BucketTransform(num_buckets=22)" + + +def test_truncate_transform_serialize() -> None: + assert UnknownTransform("unknown").model_dump_json() == '"unknown"' + + +def test_unknown_transform_deserialize() -> None: + transform = FauxModel.model_validate_json('"unknown"').root + assert transform == UnknownTransform("unknown") + + +def test_unknown_transform_str() -> None: + assert str(UnknownTransform("unknown")) == "unknown" + + +def test_unknown_transform_repr() -> None: + assert repr(UnknownTransform("unknown")) == "UnknownTransform(transform='unknown')" + + +def test_void_transform_serialize() -> None: + assert VoidTransform().model_dump_json() == '"void"' + + +def test_void_transform_deserialize() -> None: + transform = FauxModel.model_validate_json('"void"').root + assert transform == VoidTransform() + + +def test_void_transform_str() -> None: + assert str(VoidTransform()) == "void" + + +def test_void_transform_repr() -> None: + assert repr(VoidTransform()) == "VoidTransform()" + + +def test_year_transform_serialize() -> None: + assert YearTransform().model_dump_json() == '"year"' + + +def test_year_transform_deserialize() -> None: + transform = FauxModel.model_validate_json('"year"').root + assert transform == YearTransform() + + +def test_month_transform_serialize() -> None: + assert MonthTransform().model_dump_json() == '"month"' + + +def test_month_transform_deserialize() -> None: + transform = FauxModel.model_validate_json('"month"').root + assert transform == MonthTransform() + + +def test_day_transform_serialize() -> None: + assert DayTransform().model_dump_json() == '"day"' + + +def test_day_transform_deserialize() -> None: + transform = FauxModel.model_validate_json('"day"').root + assert transform == DayTransform() + + +def test_hour_transform_serialize() -> None: + assert HourTransform().model_dump_json() == '"hour"' + + +def test_hour_transform_deserialize() -> None: + transform = FauxModel.model_validate_json('"hour"').root + assert transform == HourTransform() + + +@pytest.mark.parametrize( + "transform,transform_str", + [ + (YearTransform(), "year"), + (MonthTransform(), "month"), + (DayTransform(), "day"), + (HourTransform(), "hour"), + ], +) +def test_datetime_transform_str(transform: TimeTransform[Any], transform_str: str) -> None: + assert str(transform) == transform_str + + +@pytest.mark.parametrize( + "transform,transform_repr", + [ + (YearTransform(), "YearTransform()"), + (MonthTransform(), "MonthTransform()"), + (DayTransform(), "DayTransform()"), + (HourTransform(), "HourTransform()"), + ], +) +def test_datetime_transform_repr(transform: TimeTransform[Any], transform_repr: str) -> None: + assert repr(transform) == transform_repr + + +@pytest.fixture +def bound_reference_str() -> BoundReference[str]: + return BoundReference(field=NestedField(1, "field", StringType(), required=False), accessor=Accessor(position=0, inner=None)) + + +@pytest.fixture +def bound_reference_date() -> BoundReference[int]: + return BoundReference(field=NestedField(1, "field", DateType(), required=False), accessor=Accessor(position=0, inner=None)) + + +@pytest.fixture +def bound_reference_timestamp() -> BoundReference[int]: + return BoundReference( + field=NestedField(1, "field", TimestampType(), required=False), accessor=Accessor(position=0, inner=None) + ) + + +@pytest.fixture +def bound_reference_decimal() -> BoundReference[Decimal]: + return BoundReference( + field=NestedField(1, "field", DecimalType(8, 2), required=False), accessor=Accessor(position=0, inner=None) + ) + + +@pytest.fixture +def bound_reference_long() -> BoundReference[int]: + return BoundReference( + field=NestedField(1, "field", DecimalType(8, 2), required=False), accessor=Accessor(position=0, inner=None) + ) + + +def test_projection_bucket_unary(bound_reference_str: BoundReference[str]) -> None: + assert BucketTransform(2).project("name", BoundNotNull(term=bound_reference_str)) == NotNull(term=Reference(name="name")) + + +def test_projection_bucket_literal(bound_reference_str: BoundReference[str]) -> None: + assert BucketTransform(2).project("name", BoundEqualTo(term=bound_reference_str, literal=literal("data"))) == EqualTo( + term="name", literal=1 + ) + + +def test_projection_bucket_set_same_bucket(bound_reference_str: BoundReference[str]) -> None: + assert BucketTransform(2).project( + "name", BoundIn(term=bound_reference_str, literals={literal("hello"), literal("world")}) + ) == EqualTo(term="name", literal=1) + + +def test_projection_bucket_set_in(bound_reference_str: BoundReference[str]) -> None: + assert BucketTransform(3).project( + "name", BoundIn(term=bound_reference_str, literals={literal("hello"), literal("world")}) + ) == In(term="name", literals={1, 2}) + + +def test_projection_bucket_set_not_in(bound_reference_str: BoundReference[str]) -> None: + assert ( + BucketTransform(3).project("name", BoundNotIn(term=bound_reference_str, literals={literal("hello"), literal("world")})) + is None + ) + + +def test_projection_year_unary(bound_reference_date: BoundReference[int]) -> None: + assert YearTransform().project("name", BoundNotNull(term=bound_reference_date)) == NotNull(term="name") + + +def test_projection_year_literal(bound_reference_date: BoundReference[int]) -> None: + assert YearTransform().project("name", BoundEqualTo(term=bound_reference_date, literal=DateLiteral(1925))) == EqualTo( + term="name", literal=5 + ) + + +def test_projection_year_set_same_year(bound_reference_date: BoundReference[int]) -> None: + assert YearTransform().project( + "name", BoundIn(term=bound_reference_date, literals={DateLiteral(1925), DateLiteral(1926)}) + ) == EqualTo(term="name", literal=5) + + +def test_projection_year_set_in(bound_reference_date: BoundReference[int]) -> None: + assert YearTransform().project( + "name", BoundIn(term=bound_reference_date, literals={DateLiteral(1925), DateLiteral(2925)}) + ) == In(term="name", literals={8, 5}) + + +def test_projection_year_set_not_in(bound_reference_date: BoundReference[int]) -> None: + assert ( + YearTransform().project("name", BoundNotIn(term=bound_reference_date, literals={DateLiteral(1925), DateLiteral(2925)})) + is None + ) + + +def test_projection_month_unary(bound_reference_date: BoundReference[int]) -> None: + assert MonthTransform().project("name", BoundNotNull(term=bound_reference_date)) == NotNull(term="name") + + +def test_projection_month_literal(bound_reference_date: BoundReference[int]) -> None: + assert MonthTransform().project("name", BoundEqualTo(term=bound_reference_date, literal=DateLiteral(1925))) == EqualTo( + term="name", literal=63 + ) + + +def test_projection_month_set_same_month(bound_reference_date: BoundReference[int]) -> None: + assert MonthTransform().project( + "name", BoundIn(term=bound_reference_date, literals={DateLiteral(1925), DateLiteral(1926)}) + ) == EqualTo(term="name", literal=63) + + +def test_projection_month_set_in(bound_reference_date: BoundReference[int]) -> None: + assert MonthTransform().project( + "name", BoundIn(term=bound_reference_date, literals={DateLiteral(1925), DateLiteral(2925)}) + ) == In(term="name", literals={96, 63}) + + +def test_projection_day_month_not_in(bound_reference_date: BoundReference[int]) -> None: + assert ( + MonthTransform().project("name", BoundNotIn(term=bound_reference_date, literals={DateLiteral(1925), DateLiteral(2925)})) + is None + ) + + +def test_projection_day_unary(bound_reference_timestamp: BoundReference[int]) -> None: + assert DayTransform().project("name", BoundNotNull(term=bound_reference_timestamp)) == NotNull(term="name") + + +def test_projection_day_literal(bound_reference_timestamp: BoundReference[int]) -> None: + assert DayTransform().project( + "name", BoundEqualTo(term=bound_reference_timestamp, literal=TimestampLiteral(1667696874000)) + ) == EqualTo(term="name", literal=19) + + +def test_projection_day_set_same_day(bound_reference_timestamp: BoundReference[int]) -> None: + assert DayTransform().project( + "name", + BoundIn(term=bound_reference_timestamp, literals={TimestampLiteral(1667696874001), TimestampLiteral(1667696874000)}), + ) == EqualTo(term="name", literal=19) + + +def test_projection_day_set_in(bound_reference_timestamp: BoundReference[int]) -> None: + assert DayTransform().project( + "name", + BoundIn(term=bound_reference_timestamp, literals={TimestampLiteral(1667696874001), TimestampLiteral(1567696874000)}), + ) == In(term="name", literals={18, 19}) + + +def test_projection_day_set_not_in(bound_reference_timestamp: BoundReference[int]) -> None: + assert ( + DayTransform().project( + "name", + BoundNotIn(term=bound_reference_timestamp, literals={TimestampLiteral(1567696874), TimestampLiteral(1667696874)}), + ) + is None + ) + + +def test_projection_day_human(bound_reference_date: BoundReference[int]) -> None: + date_literal = DateLiteral(17532) + assert DayTransform().project("dt", BoundEqualTo(term=bound_reference_date, literal=date_literal)) == EqualTo( + term="dt", literal=17532 + ) # == 2018, 1, 1 + + assert DayTransform().project("dt", BoundLessThanOrEqual(term=bound_reference_date, literal=date_literal)) == LessThanOrEqual( + term="dt", literal=17532 + ) # <= 2018, 1, 1 + + assert DayTransform().project("dt", BoundLessThan(term=bound_reference_date, literal=date_literal)) == LessThanOrEqual( + term="dt", literal=17531 + ) # <= 2017, 12, 31 + + assert DayTransform().project( + "dt", BoundGreaterThanOrEqual(term=bound_reference_date, literal=date_literal) + ) == GreaterThanOrEqual( + term="dt", literal=17532 + ) # >= 2018, 1, 1 + + assert DayTransform().project("dt", BoundGreaterThan(term=bound_reference_date, literal=date_literal)) == GreaterThanOrEqual( + term="dt", literal=17533 + ) # >= 2018, 1, 2 + + +def test_projection_hour_unary(bound_reference_timestamp: BoundReference[int]) -> None: + assert HourTransform().project("name", BoundNotNull(term=bound_reference_timestamp)) == NotNull(term="name") + + +TIMESTAMP_EXAMPLE = 1667696874000000 # Sun Nov 06 2022 01:07:54 +HOUR_IN_MICROSECONDS = 60 * 60 * 1000 * 1000 + + +def test_projection_hour_literal(bound_reference_timestamp: BoundReference[int]) -> None: + assert HourTransform().project( + "name", BoundEqualTo(term=bound_reference_timestamp, literal=TimestampLiteral(TIMESTAMP_EXAMPLE)) + ) == EqualTo(term="name", literal=463249) + + +def test_projection_hour_set_same_hour(bound_reference_timestamp: BoundReference[int]) -> None: + assert HourTransform().project( + "name", + BoundIn( + term=bound_reference_timestamp, + literals={TimestampLiteral(TIMESTAMP_EXAMPLE + 1), TimestampLiteral(TIMESTAMP_EXAMPLE)}, + ), + ) == EqualTo(term="name", literal=463249) + + +def test_projection_hour_set_in(bound_reference_timestamp: BoundReference[int]) -> None: + assert HourTransform().project( + "name", + BoundIn( + term=bound_reference_timestamp, + literals={TimestampLiteral(TIMESTAMP_EXAMPLE + HOUR_IN_MICROSECONDS), TimestampLiteral(TIMESTAMP_EXAMPLE)}, + ), + ) == In(term="name", literals={463249, 463250}) + + +def test_projection_hour_set_not_in(bound_reference_timestamp: BoundReference[int]) -> None: + assert ( + HourTransform().project( + "name", + BoundNotIn( + term=bound_reference_timestamp, + literals={TimestampLiteral(TIMESTAMP_EXAMPLE + HOUR_IN_MICROSECONDS), TimestampLiteral(TIMESTAMP_EXAMPLE)}, + ), + ) + is None + ) + + +def test_projection_identity_unary(bound_reference_timestamp: BoundReference[int]) -> None: + assert IdentityTransform().project("name", BoundNotNull(term=bound_reference_timestamp)) == NotNull(term="name") + + +def test_projection_identity_literal(bound_reference_timestamp: BoundReference[int]) -> None: + assert IdentityTransform().project( + "name", BoundEqualTo(term=bound_reference_timestamp, literal=TimestampLiteral(TIMESTAMP_EXAMPLE)) + ) == EqualTo(term="name", literal=TimestampLiteral(TIMESTAMP_EXAMPLE)) + + +def test_projection_identity_set_in(bound_reference_timestamp: BoundReference[int]) -> None: + assert IdentityTransform().project( + "name", + BoundIn( + term=bound_reference_timestamp, + literals={TimestampLiteral(TIMESTAMP_EXAMPLE + HOUR_IN_MICROSECONDS), TimestampLiteral(TIMESTAMP_EXAMPLE)}, + ), + ) == In( + term="name", + literals={TimestampLiteral(TIMESTAMP_EXAMPLE + HOUR_IN_MICROSECONDS), TimestampLiteral(TIMESTAMP_EXAMPLE)}, + ) + + +def test_projection_identity_set_not_in(bound_reference_timestamp: BoundReference[int]) -> None: + assert IdentityTransform().project( + "name", + BoundNotIn( + term=bound_reference_timestamp, + literals={TimestampLiteral(TIMESTAMP_EXAMPLE + HOUR_IN_MICROSECONDS), TimestampLiteral(TIMESTAMP_EXAMPLE)}, + ), + ) == NotIn( + term="name", + literals={TimestampLiteral(TIMESTAMP_EXAMPLE + HOUR_IN_MICROSECONDS), TimestampLiteral(TIMESTAMP_EXAMPLE)}, + ) + + +def test_projection_truncate_string_unary(bound_reference_str: BoundReference[str]) -> None: + assert TruncateTransform(2).project("name", BoundNotNull(term=bound_reference_str)) == NotNull(term="name") + + +def test_projection_truncate_string_literal_eq(bound_reference_str: BoundReference[str]) -> None: + assert TruncateTransform(2).project("name", BoundEqualTo(term=bound_reference_str, literal=literal("data"))) == EqualTo( + term="name", literal=literal("da") + ) + + +def test_projection_truncate_string_literal_gt(bound_reference_str: BoundReference[str]) -> None: + assert TruncateTransform(2).project("name", BoundGreaterThan(term=bound_reference_str, literal=literal("data"))) == EqualTo( + term="name", literal=literal("da") + ) + + +def test_projection_truncate_string_literal_gte(bound_reference_str: BoundReference[str]) -> None: + assert TruncateTransform(2).project( + "name", BoundGreaterThanOrEqual(term=bound_reference_str, literal=literal("data")) + ) == EqualTo(term="name", literal=literal("da")) + + +def test_projection_truncate_string_set_same_result(bound_reference_str: BoundReference[str]) -> None: + assert TruncateTransform(2).project( + "name", BoundIn(term=bound_reference_str, literals={literal("hello"), literal("helloworld")}) + ) == EqualTo(term="name", literal=literal("he")) + + +def test_projection_truncate_string_set_in(bound_reference_str: BoundReference[str]) -> None: + assert TruncateTransform(3).project( + "name", BoundIn(term=bound_reference_str, literals={literal("hello"), literal("world")}) + ) == In(term="name", literals={literal("hel"), literal("wor")}) + + +def test_projection_truncate_string_set_not_in(bound_reference_str: BoundReference[str]) -> None: + assert ( + TruncateTransform(3).project("name", BoundNotIn(term=bound_reference_str, literals={literal("hello"), literal("world")})) + is None + ) + + +def test_projection_truncate_decimal_literal_eq(bound_reference_decimal: BoundReference[Decimal]) -> None: + assert TruncateTransform(2).project( + "name", BoundEqualTo(term=bound_reference_decimal, literal=DecimalLiteral(Decimal(19.25))) + ) == EqualTo(term="name", literal=Decimal("19.24")) + + +def test_projection_truncate_decimal_literal_gt(bound_reference_decimal: BoundReference[Decimal]) -> None: + assert TruncateTransform(2).project( + "name", BoundGreaterThan(term=bound_reference_decimal, literal=DecimalLiteral(Decimal(19.25))) + ) == GreaterThanOrEqual(term="name", literal=Decimal("19.26")) + + +def test_projection_truncate_decimal_literal_gte(bound_reference_decimal: BoundReference[Decimal]) -> None: + assert TruncateTransform(2).project( + "name", BoundGreaterThanOrEqual(term=bound_reference_decimal, literal=DecimalLiteral(Decimal(19.25))) + ) == GreaterThanOrEqual(term="name", literal=Decimal("19.24")) + + +def test_projection_truncate_decimal_in(bound_reference_decimal: BoundReference[Decimal]) -> None: + assert TruncateTransform(2).project( + "name", BoundIn(term=bound_reference_decimal, literals={literal(Decimal(19.25)), literal(Decimal(18.15))}) + ) == In( + term="name", + literals={ + Decimal("19.24"), + Decimal("18.14999999999999857891452847979962825775146484374"), + }, + ) + + +def test_projection_truncate_long_literal_eq(bound_reference_decimal: BoundReference[Decimal]) -> None: + assert TruncateTransform(2).project( + "name", BoundEqualTo(term=bound_reference_decimal, literal=DecimalLiteral(Decimal(19.25))) + ) == EqualTo(term="name", literal=Decimal("19.24")) + + +def test_projection_truncate_long_literal_gt(bound_reference_decimal: BoundReference[Decimal]) -> None: + assert TruncateTransform(2).project( + "name", BoundGreaterThan(term=bound_reference_decimal, literal=DecimalLiteral(Decimal(19.25))) + ) == GreaterThanOrEqual(term="name", literal=Decimal("19.26")) + + +def test_projection_truncate_long_literal_gte(bound_reference_decimal: BoundReference[Decimal]) -> None: + assert TruncateTransform(2).project( + "name", BoundGreaterThanOrEqual(term=bound_reference_decimal, literal=DecimalLiteral(Decimal(19.25))) + ) == GreaterThanOrEqual(term="name", literal=Decimal("19.24")) + + +def test_projection_truncate_long_in(bound_reference_decimal: BoundReference[Decimal]) -> None: + assert TruncateTransform(2).project( + "name", BoundIn(term=bound_reference_decimal, literals={DecimalLiteral(Decimal(19.25)), DecimalLiteral(Decimal(18.15))}) + ) == In( + term="name", + literals={ + Decimal("19.24"), + Decimal("18.14999999999999857891452847979962825775146484374"), + }, + ) + + +def test_projection_truncate_string_starts_with(bound_reference_str: BoundReference[str]) -> None: + assert TruncateTransform(2).project( + "name", BoundStartsWith(term=bound_reference_str, literal=literal("hello")) + ) == StartsWith(term="name", literal=literal("he")) + + +def test_projection_truncate_string_not_starts_with(bound_reference_str: BoundReference[str]) -> None: + assert TruncateTransform(2).project( + "name", BoundNotStartsWith(term=bound_reference_str, literal=literal("hello")) + ) == NotStartsWith(term="name", literal=literal("he")) diff --git a/tests/test_typedef.py b/tests/test_typedef.py new file mode 100644 index 0000000000..43388addca --- /dev/null +++ b/tests/test_typedef.py @@ -0,0 +1,89 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +import pytest + +from pyiceberg.schema import Schema +from pyiceberg.typedef import FrozenDict, KeyDefaultDict, Record +from pyiceberg.types import ( + IntegerType, + NestedField, + StringType, + StructType, +) + + +def test_setitem_frozendict() -> None: + d = FrozenDict(foo=1, bar=2) + with pytest.raises(AttributeError): + d["foo"] = 3 + + +def test_update_frozendict() -> None: + d = FrozenDict(foo=1, bar=2) + with pytest.raises(AttributeError): + d.update({"yes": 2}) + + +def test_keydefaultdict() -> None: + def one(_: int) -> int: + return 1 + + defaultdict = KeyDefaultDict(one) + assert defaultdict[22] == 1 + + +def test_record_repr(table_schema_simple: Schema) -> None: + r = Record("vo", 1, True, struct=table_schema_simple.as_struct()) + assert repr(r) == "Record[foo='vo', bar=1, baz=True]" + + +def test_named_record() -> None: + r = Record(struct=StructType(NestedField(0, "id", IntegerType()), NestedField(1, "name", StringType()))) + + with pytest.raises(AttributeError): + assert r.id is None # type: ignore + + with pytest.raises(AttributeError): + assert r.name is None # type: ignore + + r[0] = 123 + r[1] = "abc" + + assert r[0] == 123 + assert r[1] == "abc" + + assert r.id == 123 # type: ignore + assert r.name == "abc" # type: ignore + + +def test_record_positional_args() -> None: + r = Record(1, "a", True) + assert repr(r) == "Record[field1=1, field2='a', field3=True]" + + +def test_record_named_args() -> None: + r = Record(foo=1, bar="a", baz=True) + + assert r.foo == 1 # type: ignore + assert r.bar == "a" # type: ignore + assert r.baz is True # type: ignore + + assert r[0] == 1 + assert r[1] == "a" + assert r[2] is True + + assert repr(r) == "Record[foo=1, bar='a', baz=True]" diff --git a/tests/test_types.py b/tests/test_types.py new file mode 100644 index 0000000000..249ee98a6f --- /dev/null +++ b/tests/test_types.py @@ -0,0 +1,615 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# pylint: disable=W0123,W0613 +import pickle +from typing import Type + +import pytest + +from pyiceberg.exceptions import ValidationError +from pyiceberg.types import ( + BinaryType, + BooleanType, + DateType, + DecimalType, + DoubleType, + FixedType, + FloatType, + IcebergType, + IntegerType, + ListType, + LongType, + MapType, + NestedField, + PrimitiveType, + StringType, + StructType, + TimestampType, + TimestamptzType, + TimeType, + UUIDType, +) + +non_parameterized_types = [ + (1, BooleanType), + (2, IntegerType), + (3, LongType), + (4, FloatType), + (5, DoubleType), + (6, DateType), + (7, TimeType), + (8, TimestampType), + (9, TimestamptzType), + (10, StringType), + (11, UUIDType), + (12, BinaryType), +] + + +@pytest.mark.parametrize("input_index, input_type", non_parameterized_types) +def test_repr_primitive_types(input_index: int, input_type: Type[PrimitiveType]) -> None: + assert isinstance(eval(repr(input_type())), input_type) + assert input_type == pickle.loads(pickle.dumps(input_type)) + + +@pytest.mark.parametrize( + "input_type, result", + [ + (BooleanType(), True), + (IntegerType(), True), + (LongType(), True), + (FloatType(), True), + (DoubleType(), True), + (DateType(), True), + (TimeType(), True), + (TimestampType(), True), + (TimestamptzType(), True), + (StringType(), True), + (UUIDType(), True), + (BinaryType(), True), + (DecimalType(32, 3), True), + (FixedType(8), True), + (ListType(1, StringType(), True), False), + ( + MapType(1, StringType(), 2, IntegerType(), False), + False, + ), + ( + StructType( + NestedField(1, "required_field", StringType(), required=False), + NestedField(2, "optional_field", IntegerType(), required=True), + ), + False, + ), + (NestedField(1, "required_field", StringType(), required=False), False), + ], +) +def test_is_primitive(input_type: IcebergType, result: bool) -> None: + assert input_type.is_primitive == result + + +def test_fixed_type() -> None: + type_var = FixedType(length=5) + assert len(type_var) == 5 + assert str(type_var) == "fixed[5]" + assert repr(type_var) == "FixedType(length=5)" + assert str(type_var) == str(eval(repr(type_var))) + assert type_var == FixedType(5) + assert type_var != FixedType(6) + assert type_var == pickle.loads(pickle.dumps(type_var)) + + +def test_decimal_type() -> None: + type_var = DecimalType(precision=9, scale=2) + assert type_var.precision == 9 + assert type_var.scale == 2 + assert str(type_var) == "decimal(9, 2)" + assert repr(type_var) == "DecimalType(precision=9, scale=2)" + assert str(type_var) == str(eval(repr(type_var))) + assert type_var == DecimalType(9, 2) + assert type_var != DecimalType(9, 3) + assert type_var == pickle.loads(pickle.dumps(type_var)) + + +def test_struct_type() -> None: + type_var = StructType( + NestedField(1, "optional_field", IntegerType(), required=True), + NestedField(2, "required_field", FixedType(5), required=False), + NestedField( + 3, + "required_field", + StructType( + NestedField(4, "optional_field", DecimalType(8, 2), required=True), + NestedField(5, "required_field", LongType(), required=False), + ), + required=False, + ), + ) + assert len(type_var.fields) == 3 + assert str(type_var) == str(eval(repr(type_var))) + assert type_var == eval(repr(type_var)) + assert type_var != StructType(NestedField(1, "optional_field", IntegerType(), required=True)) + assert type_var == pickle.loads(pickle.dumps(type_var)) + + +def test_list_type() -> None: + type_var = ListType( + 1, + StructType( + NestedField(2, "optional_field", DecimalType(8, 2), required=True), + NestedField(3, "required_field", LongType(), required=False), + ), + False, + ) + assert isinstance(type_var.element_field.field_type, StructType) + assert len(type_var.element_field.field_type.fields) == 2 + assert type_var.element_field.field_id == 1 + assert str(type_var) == str(eval(repr(type_var))) + assert type_var == eval(repr(type_var)) + assert type_var != ListType( + 1, + StructType( + NestedField(2, "optional_field", DecimalType(8, 2), required=True), + ), + True, + ) + assert type_var == pickle.loads(pickle.dumps(type_var)) + + +def test_map_type() -> None: + type_var = MapType(1, DoubleType(), 2, UUIDType(), False) + assert isinstance(type_var.key_field.field_type, DoubleType) + assert type_var.key_field.field_id == 1 + assert isinstance(type_var.value_field.field_type, UUIDType) + assert type_var.value_field.field_id == 2 + assert str(type_var) == str(eval(repr(type_var))) + assert type_var == eval(repr(type_var)) + assert type_var != MapType(1, LongType(), 2, UUIDType(), False) + assert type_var != MapType(1, DoubleType(), 2, StringType(), True) + assert type_var == pickle.loads(pickle.dumps(type_var)) + + +def test_nested_field() -> None: + field_var = NestedField( + 1, + "optional_field1", + StructType( + NestedField( + 2, + "optional_field2", + ListType( + 3, + DoubleType(), + element_required=False, + ), + required=True, + ), + ), + required=True, + ) + assert field_var.required + assert not field_var.optional + assert field_var.field_id == 1 + assert isinstance(field_var.field_type, StructType) + assert str(field_var) == str(eval(repr(field_var))) + assert field_var == pickle.loads(pickle.dumps(field_var)) + + +@pytest.mark.parametrize("input_index,input_type", non_parameterized_types) +@pytest.mark.parametrize("check_index,check_type", non_parameterized_types) +def test_non_parameterized_type_equality( + input_index: int, input_type: Type[PrimitiveType], check_index: int, check_type: Type[PrimitiveType] +) -> None: + if input_index == check_index: + assert input_type() == check_type() + else: + assert input_type() != check_type() + + +# Examples based on https://iceberg.apache.org/spec/#appendix-c-json-serialization +def test_serialization_boolean() -> None: + assert BooleanType().model_dump_json() == '"boolean"' + + +def test_deserialization_boolean() -> None: + assert BooleanType.model_validate_json('"boolean"') == BooleanType() + + +def test_str_boolean() -> None: + assert str(BooleanType()) == "boolean" + + +def test_repr_boolean() -> None: + assert repr(BooleanType()) == "BooleanType()" + + +def test_serialization_int() -> None: + assert IntegerType().model_dump_json() == '"int"' + + +def test_deserialization_int() -> None: + assert IntegerType.model_validate_json('"int"') == IntegerType() + + +def test_str_int() -> None: + assert str(IntegerType()) == "int" + + +def test_repr_int() -> None: + assert repr(IntegerType()) == "IntegerType()" + + +def test_serialization_long() -> None: + assert LongType().model_dump_json() == '"long"' + + +def test_deserialization_long() -> None: + assert LongType.model_validate_json('"long"') == LongType() + + +def test_str_long() -> None: + assert str(LongType()) == "long" + + +def test_repr_long() -> None: + assert repr(LongType()) == "LongType()" + + +def test_serialization_float() -> None: + assert FloatType().model_dump_json() == '"float"' + + +def test_deserialization_float() -> None: + assert FloatType.model_validate_json('"float"') == FloatType() + + +def test_str_float() -> None: + assert str(FloatType()) == "float" + + +def test_repr_float() -> None: + assert repr(FloatType()) == "FloatType()" + + +def test_serialization_double() -> None: + assert DoubleType().model_dump_json() == '"double"' + + +def test_deserialization_double() -> None: + assert DoubleType.model_validate_json('"double"') == DoubleType() + + +def test_str_double() -> None: + assert str(DoubleType()) == "double" + + +def test_repr_double() -> None: + assert repr(DoubleType()) == "DoubleType()" + + +def test_serialization_date() -> None: + assert DateType().model_dump_json() == '"date"' + + +def test_deserialization_date() -> None: + assert DateType.model_validate_json('"date"') == DateType() + + +def test_str_date() -> None: + assert str(DateType()) == "date" + + +def test_repr_date() -> None: + assert repr(DateType()) == "DateType()" + + +def test_serialization_time() -> None: + assert TimeType().model_dump_json() == '"time"' + + +def test_deserialization_time() -> None: + assert TimeType.model_validate_json('"time"') == TimeType() + + +def test_str_time() -> None: + assert str(TimeType()) == "time" + + +def test_repr_time() -> None: + assert repr(TimeType()) == "TimeType()" + + +def test_serialization_timestamp() -> None: + assert TimestampType().model_dump_json() == '"timestamp"' + + +def test_deserialization_timestamp() -> None: + assert TimestampType.model_validate_json('"timestamp"') == TimestampType() + + +def test_str_timestamp() -> None: + assert str(TimestampType()) == "timestamp" + + +def test_repr_timestamp() -> None: + assert repr(TimestampType()) == "TimestampType()" + + +def test_serialization_timestamptz() -> None: + assert TimestamptzType().model_dump_json() == '"timestamptz"' + + +def test_deserialization_timestamptz() -> None: + assert TimestamptzType.model_validate_json('"timestamptz"') == TimestamptzType() + + +def test_str_timestamptz() -> None: + assert str(TimestamptzType()) == "timestamptz" + + +def test_repr_timestamptz() -> None: + assert repr(TimestamptzType()) == "TimestamptzType()" + + +def test_serialization_string() -> None: + assert StringType().model_dump_json() == '"string"' + + +def test_deserialization_string() -> None: + assert StringType.model_validate_json('"string"') == StringType() + + +def test_str_string() -> None: + assert str(StringType()) == "string" + + +def test_repr_string() -> None: + assert repr(StringType()) == "StringType()" + + +def test_serialization_uuid() -> None: + assert UUIDType().model_dump_json() == '"uuid"' + + +def test_deserialization_uuid() -> None: + assert UUIDType.model_validate_json('"uuid"') == UUIDType() + + +def test_str_uuid() -> None: + assert str(UUIDType()) == "uuid" + + +def test_repr_uuid() -> None: + assert repr(UUIDType()) == "UUIDType()" + + +def test_serialization_fixed() -> None: + assert FixedType(22).model_dump_json() == '"fixed[22]"' + + +def test_deserialization_fixed() -> None: + fixed = FixedType.model_validate_json('"fixed[22]"') + assert fixed == FixedType(22) + assert len(fixed) == 22 + + +def test_deserialization_fixed_failure() -> None: + with pytest.raises(ValidationError) as exc_info: + _ = FixedType.model_validate_json('"fixed[abc]"') + + assert "Could not match fixed[abc], expected format fixed[22]" in str(exc_info.value) + + +def test_str_fixed() -> None: + assert str(FixedType(22)) == "fixed[22]" + + +def test_repr_fixed() -> None: + assert repr(FixedType(22)) == "FixedType(length=22)" + + +def test_serialization_binary() -> None: + assert BinaryType().model_dump_json() == '"binary"' + + +def test_deserialization_binary() -> None: + assert BinaryType.model_validate_json('"binary"') == BinaryType() + + +def test_str_binary() -> None: + assert str(BinaryType()) == "binary" + + +def test_repr_binary() -> None: + assert repr(BinaryType()) == "BinaryType()" + + +def test_serialization_decimal() -> None: + assert DecimalType(19, 25).model_dump_json() == '"decimal(19, 25)"' + + +def test_deserialization_decimal() -> None: + decimal = DecimalType.model_validate_json('"decimal(19, 25)"') + assert decimal == DecimalType(19, 25) + assert decimal.precision == 19 + assert decimal.scale == 25 + + +def test_deserialization_decimal_failure() -> None: + with pytest.raises(ValidationError) as exc_info: + _ = DecimalType.model_validate_json('"decimal(abc, def)"') + + assert "Could not parse decimal(abc, def) into a DecimalType" in str(exc_info.value) + + +def test_str_decimal() -> None: + assert str(DecimalType(19, 25)) == "decimal(19, 25)" + + +def test_repr_decimal() -> None: + assert repr(DecimalType(19, 25)) == "DecimalType(precision=19, scale=25)" + + +def test_serialization_nestedfield() -> None: + expected = '{"id":1,"name":"required_field","type":"string","required":true,"doc":"this is a doc"}' + actual = NestedField(1, "required_field", StringType(), True, "this is a doc").model_dump_json() + assert expected == actual + + +def test_serialization_nestedfield_no_doc() -> None: + expected = '{"id":1,"name":"required_field","type":"string","required":true}' + actual = NestedField(1, "required_field", StringType(), True).model_dump_json() + assert expected == actual + + +def test_str_nestedfield() -> None: + assert str(NestedField(1, "required_field", StringType(), True)) == "1: required_field: required string" + + +def test_repr_nestedfield() -> None: + assert ( + repr(NestedField(1, "required_field", StringType(), True)) + == "NestedField(field_id=1, name='required_field', field_type=StringType(), required=True)" + ) + + +def test_nestedfield_by_alias() -> None: + # We should be able to initialize a NestedField by alias + expected = NestedField(1, "required_field", StringType(), True, "this is a doc") + actual = NestedField(**{"id": 1, "name": "required_field", "type": "string", "required": True, "doc": "this is a doc"}) # type: ignore + assert expected == actual + + +def test_deserialization_nestedfield() -> None: + expected = NestedField(1, "required_field", StringType(), True, "this is a doc") + actual = NestedField.model_validate_json( + '{"id": 1, "name": "required_field", "type": "string", "required": true, "doc": "this is a doc"}' + ) + assert expected == actual + + +def test_deserialization_nestedfield_inner() -> None: + expected = NestedField(1, "required_field", StringType(), True, "this is a doc") + actual = NestedField.model_validate_json( + '{"id":1,"name":"required_field","type":"string","required":true,"doc":"this is a doc"}' + ) + assert expected == actual + + +def test_serialization_struct() -> None: + actual = StructType( + NestedField(1, "required_field", StringType(), True, "this is a doc"), NestedField(2, "optional_field", IntegerType()) + ).model_dump_json() + expected = ( + '{"type":"struct","fields":[' + '{"id":1,"name":"required_field","type":"string","required":true,"doc":"this is a doc"},' + '{"id":2,"name":"optional_field","type":"int","required":true}' + "]}" + ) + assert actual == expected + + +def test_deserialization_struct() -> None: + actual = StructType.model_validate_json( + """ + { + "type": "struct", + "fields": [{ + "id": 1, + "name": "required_field", + "type": "string", + "required": true, + "doc": "this is a doc" + }, + { + "id": 2, + "name": "optional_field", + "type": "int", + "required": true + } + ] + } + """ + ) + + expected = StructType( + NestedField(1, "required_field", StringType(), True, "this is a doc"), NestedField(2, "optional_field", IntegerType()) + ) + + assert actual == expected + + +def test_str_struct(simple_struct: StructType) -> None: + assert str(simple_struct) == "struct<1: required_field: required string (this is a doc), 2: optional_field: required int>" + + +def test_repr_struct(simple_struct: StructType) -> None: + assert ( + repr(simple_struct) + == "StructType(fields=(NestedField(field_id=1, name='required_field', field_type=StringType(), required=True), NestedField(field_id=2, name='optional_field', field_type=IntegerType(), required=True),))" + ) + + +def test_serialization_list(simple_list: ListType) -> None: + actual = simple_list.model_dump_json() + expected = '{"type":"list","element-id":22,"element":"string","element-required":true}' + assert actual == expected + + +def test_deserialization_list(simple_list: ListType) -> None: + actual = ListType.model_validate_json('{"type": "list", "element-id": 22, "element": "string", "element-required": true}') + assert actual == simple_list + + +def test_str_list(simple_list: ListType) -> None: + assert str(simple_list) == "list" + + +def test_repr_list(simple_list: ListType) -> None: + assert repr(simple_list) == "ListType(type='list', element_id=22, element_type=StringType(), element_required=True)" + + +def test_serialization_map(simple_map: MapType) -> None: + actual = simple_map.model_dump_json() + expected = """{"type":"map","key-id":19,"key":"string","value-id":25,"value":"double","value-required":false}""" + + assert actual == expected + + +def test_deserialization_map(simple_map: MapType) -> None: + actual = MapType.model_validate_json( + """{"type": "map", "key-id": 19, "key": "string", "value-id": 25, "value": "double", "value-required": false}""" + ) + assert actual == simple_map + + +def test_str_map(simple_map: MapType) -> None: + assert str(simple_map) == "map" + + +def test_repr_map(simple_map: MapType) -> None: + assert ( + repr(simple_map) + == "MapType(type='map', key_id=19, key_type=StringType(), value_id=25, value_type=DoubleType(), value_required=False)" + ) + + +def test_types_singleton() -> None: + """The types are immutable so we can return the same instance multiple times""" + assert id(BooleanType()) == id(BooleanType()) + assert id(FixedType(22)) == id(FixedType(22)) + assert id(FixedType(19)) != id(FixedType(25)) diff --git a/tests/test_version.py b/tests/test_version.py new file mode 100644 index 0000000000..32d96b06a5 --- /dev/null +++ b/tests/test_version.py @@ -0,0 +1,28 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from pyiceberg import __version__ + + +def test_version_format() -> None: + from importlib import metadata + + installed_version = metadata.version("pyiceberg") + + assert ( + __version__ == installed_version + ), f"{__version__} <> {installed_version}, the installed version does not match with the current codebase" diff --git a/tests/utils/test_bin_packing.py b/tests/utils/test_bin_packing.py new file mode 100644 index 0000000000..054ea79556 --- /dev/null +++ b/tests/utils/test_bin_packing.py @@ -0,0 +1,86 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import random +from typing import List + +import pytest + +from pyiceberg.utils.bin_packing import PackingIterator + + +@pytest.mark.parametrize( + "splits, lookback, split_size, open_cost", + [ + ([random.randint(0, 64) for x in range(200)], 20, 128, 4), # random splits + ([], 20, 128, 4), # no splits + ( + [0] * 100 + [random.randint(0, 64) in range(10)] + [0] * 100, + 20, + 128, + 4, + ), # sparse + ], +) +def test_bin_packing(splits: List[int], lookback: int, split_size: int, open_cost: int) -> None: + def weight_func(x: int) -> int: + return max(x, open_cost) + + item_list_sums: List[int] = [sum(item) for item in PackingIterator(splits, split_size, lookback, weight_func)] + assert all(split_size >= item_sum >= 0 for item_sum in item_list_sums) + + +@pytest.mark.parametrize( + "splits, target_weight, lookback, largest_bin_first, expected_lists", + [ + ( + [36, 36, 36, 36, 73, 110, 128], + 128, + 2, + True, + [[110], [128], [36, 73], [36, 36, 36]], + ), + ( + [36, 36, 36, 36, 73, 110, 128], + 128, + 2, + False, + [[36, 36, 36], [36, 73], [110], [128]], + ), + ( + [64, 64, 128, 32, 32, 32, 32], + 128, + 1, + True, + [[64, 64], [128], [32, 32, 32, 32]], + ), + ( + [64, 64, 128, 32, 32, 32, 32], + 128, + 1, + False, + [[64, 64], [128], [32, 32, 32, 32]], + ), + ], +) +def test_bin_packing_lookback( + splits: List[int], target_weight: int, lookback: int, largest_bin_first: bool, expected_lists: List[List[int]] +) -> None: + def weight_func(x: int) -> int: + return x + + assert list(PackingIterator(splits, target_weight, lookback, weight_func, largest_bin_first)) == expected_lists diff --git a/tests/utils/test_concurrent.py b/tests/utils/test_concurrent.py new file mode 100644 index 0000000000..6d730cbe75 --- /dev/null +++ b/tests/utils/test_concurrent.py @@ -0,0 +1,52 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import os +from concurrent.futures import ThreadPoolExecutor +from typing import Dict, Optional +from unittest import mock + +import pytest + +from pyiceberg.utils.concurrent import ExecutorFactory + +EMPTY_ENV: Dict[str, Optional[str]] = {} +VALID_ENV = {"PYICEBERG_MAX_WORKERS": "5"} +INVALID_ENV = {"PYICEBERG_MAX_WORKERS": "invalid"} + + +def test_create_reused() -> None: + first = ExecutorFactory.get_or_create() + second = ExecutorFactory.get_or_create() + assert isinstance(first, ThreadPoolExecutor) + assert first is second + + +@mock.patch.dict(os.environ, EMPTY_ENV) +def test_max_workers_none() -> None: + assert ExecutorFactory.max_workers() is None + + +@mock.patch.dict(os.environ, VALID_ENV) +def test_max_workers() -> None: + assert ExecutorFactory.max_workers() == 5 + + +@mock.patch.dict(os.environ, INVALID_ENV) +def test_max_workers_invalid() -> None: + with pytest.raises(ValueError): + ExecutorFactory.max_workers() diff --git a/tests/utils/test_config.py b/tests/utils/test_config.py new file mode 100644 index 0000000000..0b6cff9d7d --- /dev/null +++ b/tests/utils/test_config.py @@ -0,0 +1,56 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +import os +from unittest import mock + +import pytest +from strictyaml import as_document + +from pyiceberg.utils.config import Config, _lowercase_dictionary_keys + +EXAMPLE_ENV = {"PYICEBERG_CATALOG__PRODUCTION__URI": "https://service.io/api"} + + +def test_config() -> None: + """To check if all the file lookups go well without any mocking""" + assert Config() + + +@mock.patch.dict(os.environ, EXAMPLE_ENV) +def test_from_environment_variables() -> None: + assert Config().get_catalog_config("production") == {"uri": "https://service.io/api"} + + +@mock.patch.dict(os.environ, EXAMPLE_ENV) +def test_from_environment_variables_uppercase() -> None: + assert Config().get_catalog_config("PRODUCTION") == {"uri": "https://service.io/api"} + + +def test_from_configuration_files(tmp_path_factory: pytest.TempPathFactory) -> None: + config_path = str(tmp_path_factory.mktemp("config")) + with open(f"{config_path}/.pyiceberg.yaml", "w", encoding="utf-8") as file: + yaml_str = as_document({"catalog": {"production": {"uri": "https://service.io/api"}}}).as_yaml() + file.write(yaml_str) + + os.environ["PYICEBERG_HOME"] = config_path + assert Config().get_catalog_config("production") == {"uri": "https://service.io/api"} + + +def test_lowercase_dictionary_keys() -> None: + uppercase_keys = {"UPPER": {"NESTED_UPPER": {"YES"}}} + expected = {"upper": {"nested_upper": {"YES"}}} + assert _lowercase_dictionary_keys(uppercase_keys) == expected # type: ignore diff --git a/tests/utils/test_datetime.py b/tests/utils/test_datetime.py new file mode 100644 index 0000000000..ac7ba54547 --- /dev/null +++ b/tests/utils/test_datetime.py @@ -0,0 +1,73 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +from datetime import datetime, timezone, tzinfo + +import pytest +import pytz + +from pyiceberg.utils.datetime import datetime_to_millis, millis_to_datetime + +timezones = [ + pytz.timezone("Etc/GMT"), + pytz.timezone("Etc/GMT+0"), + pytz.timezone("Etc/GMT+1"), + pytz.timezone("Etc/GMT+10"), + pytz.timezone("Etc/GMT+11"), + pytz.timezone("Etc/GMT+12"), + pytz.timezone("Etc/GMT+2"), + pytz.timezone("Etc/GMT+3"), + pytz.timezone("Etc/GMT+4"), + pytz.timezone("Etc/GMT+5"), + pytz.timezone("Etc/GMT+6"), + pytz.timezone("Etc/GMT+7"), + pytz.timezone("Etc/GMT+8"), + pytz.timezone("Etc/GMT+9"), + pytz.timezone("Etc/GMT-0"), + pytz.timezone("Etc/GMT-1"), + pytz.timezone("Etc/GMT-10"), + pytz.timezone("Etc/GMT-11"), + pytz.timezone("Etc/GMT-12"), + pytz.timezone("Etc/GMT-13"), + pytz.timezone("Etc/GMT-14"), + pytz.timezone("Etc/GMT-2"), + pytz.timezone("Etc/GMT-3"), + pytz.timezone("Etc/GMT-4"), + pytz.timezone("Etc/GMT-5"), + pytz.timezone("Etc/GMT-6"), + pytz.timezone("Etc/GMT-7"), + pytz.timezone("Etc/GMT-8"), + pytz.timezone("Etc/GMT-9"), +] + + +def test_datetime_to_millis() -> None: + dt = datetime(2023, 7, 10, 10, 10, 10, 123456) + expected = int(dt.replace(tzinfo=timezone.utc).timestamp() * 1_000) + datetime_millis = datetime_to_millis(dt) + assert datetime_millis == expected + + +@pytest.mark.parametrize("tz", timezones) +def test_datetime_tz_to_millis(tz: tzinfo) -> None: + dt = datetime(2023, 7, 10, 10, 10, 10, 123456, tzinfo=tz) + expected = int(dt.timestamp() * 1_000) + datetime_millis = datetime_to_millis(dt) + assert datetime_millis == expected + + +def test_millis_to_datetime() -> None: + assert millis_to_datetime(1690971805918) == datetime(2023, 8, 2, 10, 23, 25, 918000) diff --git a/tests/utils/test_decimal.py b/tests/utils/test_decimal.py new file mode 100644 index 0000000000..683eab93fd --- /dev/null +++ b/tests/utils/test_decimal.py @@ -0,0 +1,40 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +import pytest + +from pyiceberg.utils.decimal import decimal_required_bytes + + +def test_decimal_required_bytes() -> None: + assert decimal_required_bytes(precision=1) == 1 + assert decimal_required_bytes(precision=2) == 1 + assert decimal_required_bytes(precision=3) == 2 + assert decimal_required_bytes(precision=4) == 2 + assert decimal_required_bytes(precision=5) == 3 + assert decimal_required_bytes(precision=7) == 4 + assert decimal_required_bytes(precision=8) == 4 + assert decimal_required_bytes(precision=10) == 5 + assert decimal_required_bytes(precision=32) == 14 + assert decimal_required_bytes(precision=38) == 16 + + with pytest.raises(ValueError) as exc_info: + decimal_required_bytes(precision=40) + assert "(0, 40]" in str(exc_info.value) + + with pytest.raises(ValueError) as exc_info: + decimal_required_bytes(precision=-1) + assert "(0, 40]" in str(exc_info.value) diff --git a/tests/utils/test_deprecated.py b/tests/utils/test_deprecated.py new file mode 100644 index 0000000000..7c44c45859 --- /dev/null +++ b/tests/utils/test_deprecated.py @@ -0,0 +1,37 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +from unittest.mock import Mock, patch + +from pyiceberg.utils.deprecated import deprecated + + +@patch("warnings.warn") +def test_deprecated(warn: Mock) -> None: + @deprecated( + deprecated_in="0.1.0", + removed_in="0.2.0", + help_message="Please use load_something_else() instead", + ) + def deprecated_method() -> None: + pass + + deprecated_method() + + assert warn.called + assert warn.call_args[0] == ( + "Call to deprecated_method, deprecated in 0.1.0, will be removed in 0.2.0. Please use load_something_else() instead.", + ) diff --git a/tests/utils/test_lazydict.py b/tests/utils/test_lazydict.py new file mode 100644 index 0000000000..a95b015a99 --- /dev/null +++ b/tests/utils/test_lazydict.py @@ -0,0 +1,31 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from pyiceberg.utils.lazydict import LazyDict + + +def test_lazy_dict_ints() -> None: + lazy_dict = LazyDict[int, int]([[1, 2], [3, 4]]) + assert lazy_dict[1] == 2 + assert lazy_dict[3] == 4 + + +def test_lazy_dict_strings() -> None: + lazy_dict = LazyDict[int, str]([[1, "red", 5, "banana"], [3, "blue"]]) + assert lazy_dict[1] == "red" + assert lazy_dict[3] == "blue" + assert lazy_dict[5] == "banana" diff --git a/tests/utils/test_manifest.py b/tests/utils/test_manifest.py new file mode 100644 index 0000000000..76a4a8a2b4 --- /dev/null +++ b/tests/utils/test_manifest.py @@ -0,0 +1,280 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from pyiceberg.io import load_file_io +from pyiceberg.io.pyarrow import PyArrowFileIO +from pyiceberg.manifest import ( + DataFile, + DataFileContent, + FileFormat, + ManifestContent, + ManifestEntryStatus, + ManifestFile, + PartitionFieldSummary, + read_manifest_list, +) +from pyiceberg.table import Snapshot +from pyiceberg.table.snapshots import Operation, Summary + + +def test_read_manifest_entry(generated_manifest_entry_file: str) -> None: + manifest = ManifestFile( + manifest_path=generated_manifest_entry_file, manifest_length=0, partition_spec_id=0, sequence_number=None, partitions=[] + ) + manifest_entries = manifest.fetch_manifest_entry(PyArrowFileIO()) + manifest_entry = manifest_entries[0] + + assert manifest_entry.status == ManifestEntryStatus.ADDED + assert manifest_entry.snapshot_id == 8744736658442914487 + assert manifest_entry.data_sequence_number is None + assert isinstance(manifest_entry.data_file, DataFile) + + data_file = manifest_entry.data_file + + assert data_file.content is DataFileContent.DATA + assert ( + data_file.file_path + == "/home/iceberg/warehouse/nyc/taxis_partitioned/data/VendorID=null/00000-633-d8a4223e-dc97-45a1-86e1-adaba6e8abd7-00001.parquet" + ) + assert data_file.file_format == FileFormat.PARQUET + assert repr(data_file.partition) == "Record[VendorID=1, tpep_pickup_datetime=1925]" + assert data_file.record_count == 19513 + assert data_file.file_size_in_bytes == 388872 + assert data_file.column_sizes == { + 1: 53, + 2: 98153, + 3: 98693, + 4: 53, + 5: 53, + 6: 53, + 7: 17425, + 8: 18528, + 9: 53, + 10: 44788, + 11: 35571, + 12: 53, + 13: 1243, + 14: 2355, + 15: 12750, + 16: 4029, + 17: 110, + 18: 47194, + 19: 2948, + } + assert data_file.value_counts == { + 1: 19513, + 2: 19513, + 3: 19513, + 4: 19513, + 5: 19513, + 6: 19513, + 7: 19513, + 8: 19513, + 9: 19513, + 10: 19513, + 11: 19513, + 12: 19513, + 13: 19513, + 14: 19513, + 15: 19513, + 16: 19513, + 17: 19513, + 18: 19513, + 19: 19513, + } + assert data_file.null_value_counts == { + 1: 19513, + 2: 0, + 3: 0, + 4: 19513, + 5: 19513, + 6: 19513, + 7: 0, + 8: 0, + 9: 19513, + 10: 0, + 11: 0, + 12: 19513, + 13: 0, + 14: 0, + 15: 0, + 16: 0, + 17: 0, + 18: 0, + 19: 0, + } + assert data_file.nan_value_counts == {16: 0, 17: 0, 18: 0, 19: 0, 10: 0, 11: 0, 12: 0, 13: 0, 14: 0, 15: 0} + assert data_file.lower_bounds == { + 2: b"2020-04-01 00:00", + 3: b"2020-04-01 00:12", + 7: b"\x03\x00\x00\x00", + 8: b"\x01\x00\x00\x00", + 10: b"\xf6(\\\x8f\xc2\x05S\xc0", + 11: b"\x00\x00\x00\x00\x00\x00\x00\x00", + 13: b"\x00\x00\x00\x00\x00\x00\x00\x00", + 14: b"\x00\x00\x00\x00\x00\x00\xe0\xbf", + 15: b")\\\x8f\xc2\xf5(\x08\xc0", + 16: b"\x00\x00\x00\x00\x00\x00\x00\x00", + 17: b"\x00\x00\x00\x00\x00\x00\x00\x00", + 18: b"\xf6(\\\x8f\xc2\xc5S\xc0", + 19: b"\x00\x00\x00\x00\x00\x00\x04\xc0", + } + assert data_file.upper_bounds == { + 2: b"2020-04-30 23:5:", + 3: b"2020-05-01 00:41", + 7: b"\t\x01\x00\x00", + 8: b"\t\x01\x00\x00", + 10: b"\xcd\xcc\xcc\xcc\xcc,_@", + 11: b"\x1f\x85\xebQ\\\xe2\xfe@", + 13: b"\x00\x00\x00\x00\x00\x00\x12@", + 14: b"\x00\x00\x00\x00\x00\x00\xe0?", + 15: b"q=\n\xd7\xa3\xf01@", + 16: b"\x00\x00\x00\x00\x00`B@", + 17: b"333333\xd3?", + 18: b"\x00\x00\x00\x00\x00\x18b@", + 19: b"\x00\x00\x00\x00\x00\x00\x04@", + } + assert data_file.key_metadata is None + assert data_file.split_offsets == [4] + assert data_file.equality_ids is None + assert data_file.sort_order_id == 0 + + +def test_read_manifest_list(generated_manifest_file_file_v1: str) -> None: + input_file = PyArrowFileIO().new_input(generated_manifest_file_file_v1) + manifest_list = list(read_manifest_list(input_file))[0] + + assert manifest_list.manifest_length == 7989 + assert manifest_list.partition_spec_id == 0 + assert manifest_list.added_snapshot_id == 9182715666859759686 + assert manifest_list.added_files_count == 3 + assert manifest_list.existing_files_count == 0 + assert manifest_list.deleted_files_count == 0 + + assert isinstance(manifest_list.partitions, list) + + partitions_summary = manifest_list.partitions[0] + assert isinstance(partitions_summary, PartitionFieldSummary) + + assert partitions_summary.contains_null is True + assert partitions_summary.contains_nan is False + assert partitions_summary.lower_bound == b"\x01\x00\x00\x00" + assert partitions_summary.upper_bound == b"\x02\x00\x00\x00" + + assert manifest_list.added_rows_count == 237993 + assert manifest_list.existing_rows_count == 0 + assert manifest_list.deleted_rows_count == 0 + + +def test_read_manifest_v1(generated_manifest_file_file_v1: str) -> None: + io = load_file_io() + + snapshot = Snapshot( + snapshot_id=25, + parent_snapshot_id=19, + timestamp_ms=1602638573590, + manifest_list=generated_manifest_file_file_v1, + summary=Summary(Operation.APPEND), + schema_id=3, + ) + manifest_list = snapshot.manifests(io)[0] + + assert manifest_list.manifest_length == 7989 + assert manifest_list.partition_spec_id == 0 + assert manifest_list.content == ManifestContent.DATA + assert manifest_list.sequence_number == 0 + assert manifest_list.min_sequence_number == 0 + assert manifest_list.added_snapshot_id == 9182715666859759686 + assert manifest_list.added_files_count == 3 + assert manifest_list.existing_files_count == 0 + assert manifest_list.deleted_files_count == 0 + assert manifest_list.added_rows_count == 237993 + assert manifest_list.existing_rows_count == 0 + assert manifest_list.deleted_rows_count == 0 + assert manifest_list.key_metadata is None + + assert isinstance(manifest_list.partitions, list) + + partition = manifest_list.partitions[0] + + assert isinstance(partition, PartitionFieldSummary) + + assert partition.contains_null is True + assert partition.contains_nan is False + assert partition.lower_bound == b"\x01\x00\x00\x00" + assert partition.upper_bound == b"\x02\x00\x00\x00" + + entries = manifest_list.fetch_manifest_entry(io) + + assert isinstance(entries, list) + + entry = entries[0] + + assert entry.data_sequence_number == 0 + assert entry.file_sequence_number == 0 + assert entry.snapshot_id == 8744736658442914487 + assert entry.status == ManifestEntryStatus.ADDED + + +def test_read_manifest_v2(generated_manifest_file_file_v2: str) -> None: + io = load_file_io() + + snapshot = Snapshot( + snapshot_id=25, + parent_snapshot_id=19, + timestamp_ms=1602638573590, + manifest_list=generated_manifest_file_file_v2, + summary=Summary(Operation.APPEND), + schema_id=3, + ) + manifest_list = snapshot.manifests(io)[0] + + assert manifest_list.manifest_length == 7989 + assert manifest_list.partition_spec_id == 0 + assert manifest_list.content == ManifestContent.DELETES + assert manifest_list.sequence_number == 3 + assert manifest_list.min_sequence_number == 3 + assert manifest_list.added_snapshot_id == 9182715666859759686 + assert manifest_list.added_files_count == 3 + assert manifest_list.existing_files_count == 0 + assert manifest_list.deleted_files_count == 0 + assert manifest_list.added_rows_count == 237993 + assert manifest_list.existing_rows_count == 0 + assert manifest_list.deleted_rows_count == 0 + assert manifest_list.key_metadata is None + + assert isinstance(manifest_list.partitions, list) + + partition = manifest_list.partitions[0] + + assert isinstance(partition, PartitionFieldSummary) + + assert partition.contains_null is True + assert partition.contains_nan is False + assert partition.lower_bound == b"\x01\x00\x00\x00" + assert partition.upper_bound == b"\x02\x00\x00\x00" + + entries = manifest_list.fetch_manifest_entry(io) + + assert isinstance(entries, list) + + entry = entries[0] + + assert entry.data_sequence_number == 3 + assert entry.file_sequence_number == 3 + assert entry.snapshot_id == 8744736658442914487 + assert entry.status == ManifestEntryStatus.ADDED diff --git a/tests/utils/test_schema_conversion.py b/tests/utils/test_schema_conversion.py new file mode 100644 index 0000000000..2c42c445e4 --- /dev/null +++ b/tests/utils/test_schema_conversion.py @@ -0,0 +1,370 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# pylint: disable=W0212 +from typing import Any, Dict + +import pytest + +from pyiceberg.schema import Schema +from pyiceberg.types import ( + BinaryType, + BooleanType, + DateType, + DecimalType, + FixedType, + IntegerType, + ListType, + LongType, + MapType, + NestedField, + StringType, + StructType, +) +from pyiceberg.utils.schema_conversion import AvroSchemaConversion + + +def test_avro_to_iceberg(avro_schema_manifest_file_v1: Dict[str, Any]) -> None: + iceberg_schema = AvroSchemaConversion().avro_to_iceberg(avro_schema_manifest_file_v1) + expected_iceberg_schema = Schema( + NestedField( + field_id=500, name="manifest_path", field_type=StringType(), required=True, doc="Location URI with FS scheme" + ), + NestedField(field_id=501, name="manifest_length", field_type=LongType(), required=True, doc="Total file size in bytes"), + NestedField(field_id=502, name="partition_spec_id", field_type=IntegerType(), required=True, doc="Spec ID used to write"), + NestedField( + field_id=503, + name="added_snapshot_id", + field_type=LongType(), + required=False, + doc="Snapshot ID that added the manifest", + ), + NestedField( + field_id=504, name="added_data_files_count", field_type=IntegerType(), required=False, doc="Added entry count" + ), + NestedField( + field_id=505, name="existing_data_files_count", field_type=IntegerType(), required=False, doc="Existing entry count" + ), + NestedField( + field_id=506, name="deleted_data_files_count", field_type=IntegerType(), required=False, doc="Deleted entry count" + ), + NestedField( + field_id=507, + name="partitions", + field_type=ListType( + element_id=508, + element_type=StructType( + NestedField( + field_id=509, + name="contains_null", + field_type=BooleanType(), + required=True, + doc="True if any file has a null partition value", + ), + NestedField( + field_id=518, + name="contains_nan", + field_type=BooleanType(), + required=False, + doc="True if any file has a nan partition value", + ), + NestedField( + field_id=510, + name="lower_bound", + field_type=BinaryType(), + required=False, + doc="Partition lower bound for all files", + ), + NestedField( + field_id=511, + name="upper_bound", + field_type=BinaryType(), + required=False, + doc="Partition upper bound for all files", + ), + ), + element_required=True, + ), + required=False, + doc="Summary for each partition", + ), + NestedField(field_id=512, name="added_rows_count", field_type=LongType(), required=False, doc="Added rows count"), + NestedField(field_id=513, name="existing_rows_count", field_type=LongType(), required=False, doc="Existing rows count"), + NestedField(field_id=514, name="deleted_rows_count", field_type=LongType(), required=False, doc="Deleted rows count"), + schema_id=1, + identifier_field_ids=[], + ) + assert iceberg_schema == expected_iceberg_schema + + +def test_avro_list_required_primitive() -> None: + avro_schema = { + "type": "record", + "name": "avro_schema", + "fields": [ + { + "name": "array_with_string", + "type": { + "type": "array", + "items": "string", + "default": [], + "element-id": 101, + }, + "field-id": 100, + }, + ], + } + + expected_iceberg_schema = Schema( + NestedField( + field_id=100, + name="array_with_string", + field_type=ListType(element_id=101, element_type=StringType(), element_required=True), + required=True, + ), + schema_id=1, + ) + + iceberg_schema = AvroSchemaConversion().avro_to_iceberg(avro_schema) + + assert expected_iceberg_schema == iceberg_schema + + +def test_avro_list_wrapped_primitive() -> None: + avro_schema = { + "type": "record", + "name": "avro_schema", + "fields": [ + { + "name": "array_with_string", + "type": { + "type": "array", + "items": {"type": "string"}, + "default": [], + "element-id": 101, + }, + "field-id": 100, + }, + ], + } + + expected_iceberg_schema = Schema( + NestedField( + field_id=100, + name="array_with_string", + field_type=ListType(element_id=101, element_type=StringType(), element_required=True), + required=True, + ), + schema_id=1, + ) + + iceberg_schema = AvroSchemaConversion().avro_to_iceberg(avro_schema) + + assert expected_iceberg_schema == iceberg_schema + + +def test_avro_list_required_record() -> None: + avro_schema = { + "type": "record", + "name": "avro_schema", + "fields": [ + { + "name": "array_with_record", + "type": { + "type": "array", + "items": { + "type": "record", + "name": "r101", + "fields": [ + { + "name": "contains_null", + "type": "boolean", + "field-id": 102, + }, + { + "name": "contains_nan", + "type": ["null", "boolean"], + "field-id": 103, + }, + ], + }, + "element-id": 101, + }, + "field-id": 100, + } + ], + } + + expected_iceberg_schema = Schema( + NestedField( + field_id=100, + name="array_with_record", + field_type=ListType( + element_id=101, + element_type=StructType( + NestedField(field_id=102, name="contains_null", field_type=BooleanType(), required=True), + NestedField(field_id=103, name="contains_nan", field_type=BooleanType(), required=False), + ), + element_required=True, + ), + required=True, + ), + schema_id=1, + identifier_field_ids=[], + ) + + iceberg_schema = AvroSchemaConversion().avro_to_iceberg(avro_schema) + + assert expected_iceberg_schema == iceberg_schema + + +def test_resolve_union() -> None: + with pytest.raises(TypeError) as exc_info: + AvroSchemaConversion()._resolve_union(["null", "string", "long"]) + + assert "Non-optional types aren't part of the Iceberg specification" in str(exc_info.value) + + +def test_nested_type() -> None: + # In the case a primitive field is nested + assert AvroSchemaConversion()._convert_schema({"type": {"type": "string"}}) == StringType() + + +def test_map_type() -> None: + avro_type = { + "type": "map", + "values": ["null", "long"], + "key-id": 101, + "value-id": 102, + } + actual = AvroSchemaConversion()._convert_schema(avro_type) + expected = MapType(key_id=101, key_type=StringType(), value_id=102, value_type=LongType(), value_required=False) + assert actual == expected + + +def test_fixed_type() -> None: + avro_type = {"type": "fixed", "size": 22} + actual = AvroSchemaConversion()._convert_schema(avro_type) + expected = FixedType(22) + assert actual == expected + + +def test_unknown_primitive() -> None: + with pytest.raises(TypeError) as exc_info: + avro_type = "UnknownType" + AvroSchemaConversion()._convert_schema(avro_type) + assert "Unknown type: UnknownType" in str(exc_info.value) + + +def test_unknown_complex_type() -> None: + with pytest.raises(TypeError) as exc_info: + avro_type = { + "type": "UnknownType", + } + AvroSchemaConversion()._convert_schema(avro_type) + assert "Unknown type: {'type': 'UnknownType'}" in str(exc_info.value) + + +def test_convert_field_without_field_id() -> None: + with pytest.raises(ValueError) as exc_info: + avro_field = { + "name": "contains_null", + "type": "boolean", + } + AvroSchemaConversion()._convert_field(avro_field) + assert "Cannot convert field, missing field-id" in str(exc_info.value) + + +def test_convert_record_type_without_record() -> None: + with pytest.raises(ValueError) as exc_info: + avro_field = {"type": "non-record", "name": "avro_schema", "fields": []} + AvroSchemaConversion()._convert_record_type(avro_field) + assert "Expected record type, got" in str(exc_info.value) + + +def test_avro_list_missing_element_id() -> None: + avro_type = { + "name": "array_with_string", + "type": { + "type": "array", + "items": "string", + "default": [], + # "element-id": 101, + }, + "field-id": 100, + } + + with pytest.raises(ValueError) as exc_info: + AvroSchemaConversion()._convert_array_type(avro_type) + + assert "Cannot convert array-type, missing element-id:" in str(exc_info.value) + + +def test_convert_decimal_type() -> None: + avro_decimal_type = {"type": "bytes", "logicalType": "decimal", "precision": 19, "scale": 25} + actual = AvroSchemaConversion()._convert_logical_type(avro_decimal_type) + expected = DecimalType(precision=19, scale=25) + assert actual == expected + + +def test_convert_date_type() -> None: + avro_logical_type = {"type": "int", "logicalType": "date"} + actual = AvroSchemaConversion()._convert_logical_type(avro_logical_type) + assert actual == DateType() + + +def test_unknown_logical_type() -> None: + """Test raising a ValueError when converting an unknown logical type as part of an Avro schema conversion""" + avro_logical_type = {"type": "bytes", "logicalType": "date"} + with pytest.raises(ValueError) as exc_info: + AvroSchemaConversion()._convert_logical_type(avro_logical_type) + + assert "Unknown logical/physical type combination:" in str(exc_info.value) + + +def test_logical_map_with_invalid_fields() -> None: + avro_type = { + "type": "array", + "logicalType": "map", + "items": { + "type": "record", + "name": "k101_v102", + "fields": [ + {"name": "key", "type": "int", "field-id": 101}, + {"name": "value", "type": "string", "field-id": 102}, + {"name": "other", "type": "bytes", "field-id": 103}, + ], + }, + } + + with pytest.raises(ValueError) as exc_info: + AvroSchemaConversion()._convert_logical_map_type(avro_type) + + assert "Invalid key-value pair schema:" in str(exc_info.value) + + +def test_iceberg_to_avro_manifest_list(avro_schema_manifest_file_v1: Dict[str, Any]) -> None: + """Round trip the manifest list""" + iceberg_schema = AvroSchemaConversion().avro_to_iceberg(avro_schema_manifest_file_v1) + avro_result = AvroSchemaConversion().iceberg_to_avro(iceberg_schema, schema_name="manifest_file") + assert avro_schema_manifest_file_v1 == avro_result + + +def test_iceberg_to_avro_manifest(avro_schema_manifest_entry: Dict[str, Any]) -> None: + """Round trip the manifest itself""" + iceberg_schema = AvroSchemaConversion().avro_to_iceberg(avro_schema_manifest_entry) + avro_result = AvroSchemaConversion().iceberg_to_avro(iceberg_schema, schema_name="manifest_entry") + assert avro_schema_manifest_entry == avro_result diff --git a/tests/utils/test_singleton.py b/tests/utils/test_singleton.py new file mode 100644 index 0000000000..742012886c --- /dev/null +++ b/tests/utils/test_singleton.py @@ -0,0 +1,30 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +from pyiceberg.avro.reader import BooleanReader, FixedReader +from pyiceberg.transforms import VoidTransform + + +def test_singleton() -> None: + """We want to reuse the readers to avoid creating a gazillion of them""" + assert id(BooleanReader()) == id(BooleanReader()) + assert id(FixedReader(22)) == id(FixedReader(22)) + assert id(FixedReader(19)) != id(FixedReader(25)) + + +def test_singleton_transform() -> None: + """We want to reuse VoidTransform since it doesn't carry any state""" + assert id(VoidTransform()) == id(VoidTransform()) diff --git a/tests/utils/test_truncate.py b/tests/utils/test_truncate.py new file mode 100644 index 0000000000..b9c3c10335 --- /dev/null +++ b/tests/utils/test_truncate.py @@ -0,0 +1,27 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +from pyiceberg.utils.truncate import truncate_upper_bound_binary_string, truncate_upper_bound_text_string + + +def test_upper_bound_string_truncation() -> None: + assert truncate_upper_bound_text_string("aaaa", 2) == "ab" + assert truncate_upper_bound_text_string("".join([chr(0x10FFFF), chr(0x10FFFF), chr(0x0)]), 2) is None + + +def test_upper_bound_binary_truncation() -> None: + assert truncate_upper_bound_binary_string(b"\x01\x02\x03", 2) == b"\x01\x03" + assert truncate_upper_bound_binary_string(b"\xff\xff\x00", 2) is None diff --git a/vendor/README.md b/vendor/README.md new file mode 100644 index 0000000000..0b55d9e5c6 --- /dev/null +++ b/vendor/README.md @@ -0,0 +1,45 @@ + +# Vendor packages + +Some packages we want to maintain in the repository itself, because there is no good 3rd party alternative. + +## FB303 Thrift client + +fb303 is a base Thrift service and a common set of functionality for querying stats, options, and other information from a service. + +```bash +rm -f /tmp/fb303.thrift +rm -rf fb303 +curl -s https://raw.githubusercontent.com/apache/thrift/master/contrib/fb303/if/fb303.thrift > /tmp/fb303.thrift +rm -rf /tmp/gen-py/ +thrift -gen py -o /tmp/ /tmp/fb303.thrift +mv /tmp/gen-py/fb303 fb303 +``` + +# Hive Metastore Thrift definition + +The thrift definition require the fb303 service as a dependency + +```bash +rm -rf /tmp/hive +mkdir -p /tmp/hive/share/fb303/if/ +curl -s https://raw.githubusercontent.com/apache/thrift/master/contrib/fb303/if/fb303.thrift > /tmp/hive/share/fb303/if/fb303.thrift +curl -s https://raw.githubusercontent.com/apache/hive/master/standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift > /tmp/hive/hive_metastore.thrift +thrift -gen py -o /tmp/hive /tmp/hive/hive_metastore.thrift +mv /tmp/hive/gen-py/hive_metastore hive_metastore +``` diff --git a/vendor/fb303/FacebookService.py b/vendor/fb303/FacebookService.py new file mode 100644 index 0000000000..c46b0a82a2 --- /dev/null +++ b/vendor/fb303/FacebookService.py @@ -0,0 +1,2420 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# +# Autogenerated by Thrift Compiler (0.16.0) +# +# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING +# +# options string: py +# + +import logging +import sys + +from thrift.Thrift import ( + TApplicationException, + TMessageType, + TProcessor, + TType, +) +from thrift.transport import TTransport +from thrift.TRecursive import fix_spec + +from .ttypes import * + +all_structs = [] + + +class Iface: + """ + Standard base service + + """ + + def getName(self): + """ + Returns a descriptive name of the service + + """ + pass + + def getVersion(self): + """ + Returns the version of the service + + """ + pass + + def getStatus(self): + """ + Gets the status of this service + + """ + pass + + def getStatusDetails(self): + """ + User friendly description of status, such as why the service is in + the dead or warning state, or what is being started or stopped. + + """ + pass + + def getCounters(self): + """ + Gets the counters for this service + + """ + pass + + def getCounter(self, key): + """ + Gets the value of a single counter + + Parameters: + - key + + """ + pass + + def setOption(self, key, value): + """ + Sets an option + + Parameters: + - key + - value + + """ + pass + + def getOption(self, key): + """ + Gets an option + + Parameters: + - key + + """ + pass + + def getOptions(self): + """ + Gets all options + + """ + pass + + def getCpuProfile(self, profileDurationInSec): + """ + Returns a CPU profile over the given time interval (client and server + must agree on the profile format). + + Parameters: + - profileDurationInSec + + """ + pass + + def aliveSince(self): + """ + Returns the unix time that the server has been running since + + """ + pass + + def reinitialize(self): + """ + Tell the server to reload its configuration, reopen log files, etc + + """ + pass + + def shutdown(self): + """ + Suggest a shutdown to the server + + """ + pass + + +class Client(Iface): + """ + Standard base service + + """ + + def __init__(self, iprot, oprot=None): + self._iprot = self._oprot = iprot + if oprot is not None: + self._oprot = oprot + self._seqid = 0 + + def getName(self): + """ + Returns a descriptive name of the service + + """ + self.send_getName() + return self.recv_getName() + + def send_getName(self): + self._oprot.writeMessageBegin("getName", TMessageType.CALL, self._seqid) + args = getName_args() + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_getName(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = getName_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + raise TApplicationException(TApplicationException.MISSING_RESULT, "getName failed: unknown result") + + def getVersion(self): + """ + Returns the version of the service + + """ + self.send_getVersion() + return self.recv_getVersion() + + def send_getVersion(self): + self._oprot.writeMessageBegin("getVersion", TMessageType.CALL, self._seqid) + args = getVersion_args() + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_getVersion(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = getVersion_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + raise TApplicationException(TApplicationException.MISSING_RESULT, "getVersion failed: unknown result") + + def getStatus(self): + """ + Gets the status of this service + + """ + self.send_getStatus() + return self.recv_getStatus() + + def send_getStatus(self): + self._oprot.writeMessageBegin("getStatus", TMessageType.CALL, self._seqid) + args = getStatus_args() + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_getStatus(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = getStatus_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + raise TApplicationException(TApplicationException.MISSING_RESULT, "getStatus failed: unknown result") + + def getStatusDetails(self): + """ + User friendly description of status, such as why the service is in + the dead or warning state, or what is being started or stopped. + + """ + self.send_getStatusDetails() + return self.recv_getStatusDetails() + + def send_getStatusDetails(self): + self._oprot.writeMessageBegin("getStatusDetails", TMessageType.CALL, self._seqid) + args = getStatusDetails_args() + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_getStatusDetails(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = getStatusDetails_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + raise TApplicationException(TApplicationException.MISSING_RESULT, "getStatusDetails failed: unknown result") + + def getCounters(self): + """ + Gets the counters for this service + + """ + self.send_getCounters() + return self.recv_getCounters() + + def send_getCounters(self): + self._oprot.writeMessageBegin("getCounters", TMessageType.CALL, self._seqid) + args = getCounters_args() + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_getCounters(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = getCounters_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + raise TApplicationException(TApplicationException.MISSING_RESULT, "getCounters failed: unknown result") + + def getCounter(self, key): + """ + Gets the value of a single counter + + Parameters: + - key + + """ + self.send_getCounter(key) + return self.recv_getCounter() + + def send_getCounter(self, key): + self._oprot.writeMessageBegin("getCounter", TMessageType.CALL, self._seqid) + args = getCounter_args() + args.key = key + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_getCounter(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = getCounter_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + raise TApplicationException(TApplicationException.MISSING_RESULT, "getCounter failed: unknown result") + + def setOption(self, key, value): + """ + Sets an option + + Parameters: + - key + - value + + """ + self.send_setOption(key, value) + self.recv_setOption() + + def send_setOption(self, key, value): + self._oprot.writeMessageBegin("setOption", TMessageType.CALL, self._seqid) + args = setOption_args() + args.key = key + args.value = value + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_setOption(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = setOption_result() + result.read(iprot) + iprot.readMessageEnd() + return + + def getOption(self, key): + """ + Gets an option + + Parameters: + - key + + """ + self.send_getOption(key) + return self.recv_getOption() + + def send_getOption(self, key): + self._oprot.writeMessageBegin("getOption", TMessageType.CALL, self._seqid) + args = getOption_args() + args.key = key + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_getOption(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = getOption_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + raise TApplicationException(TApplicationException.MISSING_RESULT, "getOption failed: unknown result") + + def getOptions(self): + """ + Gets all options + + """ + self.send_getOptions() + return self.recv_getOptions() + + def send_getOptions(self): + self._oprot.writeMessageBegin("getOptions", TMessageType.CALL, self._seqid) + args = getOptions_args() + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_getOptions(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = getOptions_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + raise TApplicationException(TApplicationException.MISSING_RESULT, "getOptions failed: unknown result") + + def getCpuProfile(self, profileDurationInSec): + """ + Returns a CPU profile over the given time interval (client and server + must agree on the profile format). + + Parameters: + - profileDurationInSec + + """ + self.send_getCpuProfile(profileDurationInSec) + return self.recv_getCpuProfile() + + def send_getCpuProfile(self, profileDurationInSec): + self._oprot.writeMessageBegin("getCpuProfile", TMessageType.CALL, self._seqid) + args = getCpuProfile_args() + args.profileDurationInSec = profileDurationInSec + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_getCpuProfile(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = getCpuProfile_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + raise TApplicationException(TApplicationException.MISSING_RESULT, "getCpuProfile failed: unknown result") + + def aliveSince(self): + """ + Returns the unix time that the server has been running since + + """ + self.send_aliveSince() + return self.recv_aliveSince() + + def send_aliveSince(self): + self._oprot.writeMessageBegin("aliveSince", TMessageType.CALL, self._seqid) + args = aliveSince_args() + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_aliveSince(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = aliveSince_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + raise TApplicationException(TApplicationException.MISSING_RESULT, "aliveSince failed: unknown result") + + def reinitialize(self): + """ + Tell the server to reload its configuration, reopen log files, etc + + """ + self.send_reinitialize() + + def send_reinitialize(self): + self._oprot.writeMessageBegin("reinitialize", TMessageType.ONEWAY, self._seqid) + args = reinitialize_args() + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def shutdown(self): + """ + Suggest a shutdown to the server + + """ + self.send_shutdown() + + def send_shutdown(self): + self._oprot.writeMessageBegin("shutdown", TMessageType.ONEWAY, self._seqid) + args = shutdown_args() + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + +class Processor(Iface, TProcessor): + def __init__(self, handler): + self._handler = handler + self._processMap = {} + self._processMap["getName"] = Processor.process_getName + self._processMap["getVersion"] = Processor.process_getVersion + self._processMap["getStatus"] = Processor.process_getStatus + self._processMap["getStatusDetails"] = Processor.process_getStatusDetails + self._processMap["getCounters"] = Processor.process_getCounters + self._processMap["getCounter"] = Processor.process_getCounter + self._processMap["setOption"] = Processor.process_setOption + self._processMap["getOption"] = Processor.process_getOption + self._processMap["getOptions"] = Processor.process_getOptions + self._processMap["getCpuProfile"] = Processor.process_getCpuProfile + self._processMap["aliveSince"] = Processor.process_aliveSince + self._processMap["reinitialize"] = Processor.process_reinitialize + self._processMap["shutdown"] = Processor.process_shutdown + self._on_message_begin = None + + def on_message_begin(self, func): + self._on_message_begin = func + + def process(self, iprot, oprot): + (name, type, seqid) = iprot.readMessageBegin() + if self._on_message_begin: + self._on_message_begin(name, type, seqid) + if name not in self._processMap: + iprot.skip(TType.STRUCT) + iprot.readMessageEnd() + x = TApplicationException(TApplicationException.UNKNOWN_METHOD, "Unknown function %s" % (name)) + oprot.writeMessageBegin(name, TMessageType.EXCEPTION, seqid) + x.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + return + else: + self._processMap[name](self, seqid, iprot, oprot) + return True + + def process_getName(self, seqid, iprot, oprot): + args = getName_args() + args.read(iprot) + iprot.readMessageEnd() + result = getName_result() + try: + result.success = self._handler.getName() + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("getName", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_getVersion(self, seqid, iprot, oprot): + args = getVersion_args() + args.read(iprot) + iprot.readMessageEnd() + result = getVersion_result() + try: + result.success = self._handler.getVersion() + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("getVersion", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_getStatus(self, seqid, iprot, oprot): + args = getStatus_args() + args.read(iprot) + iprot.readMessageEnd() + result = getStatus_result() + try: + result.success = self._handler.getStatus() + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("getStatus", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_getStatusDetails(self, seqid, iprot, oprot): + args = getStatusDetails_args() + args.read(iprot) + iprot.readMessageEnd() + result = getStatusDetails_result() + try: + result.success = self._handler.getStatusDetails() + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("getStatusDetails", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_getCounters(self, seqid, iprot, oprot): + args = getCounters_args() + args.read(iprot) + iprot.readMessageEnd() + result = getCounters_result() + try: + result.success = self._handler.getCounters() + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("getCounters", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_getCounter(self, seqid, iprot, oprot): + args = getCounter_args() + args.read(iprot) + iprot.readMessageEnd() + result = getCounter_result() + try: + result.success = self._handler.getCounter(args.key) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("getCounter", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_setOption(self, seqid, iprot, oprot): + args = setOption_args() + args.read(iprot) + iprot.readMessageEnd() + result = setOption_result() + try: + self._handler.setOption(args.key, args.value) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("setOption", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_getOption(self, seqid, iprot, oprot): + args = getOption_args() + args.read(iprot) + iprot.readMessageEnd() + result = getOption_result() + try: + result.success = self._handler.getOption(args.key) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("getOption", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_getOptions(self, seqid, iprot, oprot): + args = getOptions_args() + args.read(iprot) + iprot.readMessageEnd() + result = getOptions_result() + try: + result.success = self._handler.getOptions() + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("getOptions", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_getCpuProfile(self, seqid, iprot, oprot): + args = getCpuProfile_args() + args.read(iprot) + iprot.readMessageEnd() + result = getCpuProfile_result() + try: + result.success = self._handler.getCpuProfile(args.profileDurationInSec) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("getCpuProfile", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_aliveSince(self, seqid, iprot, oprot): + args = aliveSince_args() + args.read(iprot) + iprot.readMessageEnd() + result = aliveSince_result() + try: + result.success = self._handler.aliveSince() + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("aliveSince", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_reinitialize(self, seqid, iprot, oprot): + args = reinitialize_args() + args.read(iprot) + iprot.readMessageEnd() + try: + self._handler.reinitialize() + except TTransport.TTransportException: + raise + except Exception: + logging.exception("Exception in oneway handler") + + def process_shutdown(self, seqid, iprot, oprot): + args = shutdown_args() + args.read(iprot) + iprot.readMessageEnd() + try: + self._handler.shutdown() + except TTransport.TTransportException: + raise + except Exception: + logging.exception("Exception in oneway handler") + + +# HELPER FUNCTIONS AND STRUCTURES + + +class getName_args: + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("getName_args") + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(getName_args) +getName_args.thrift_spec = () + + +class getName_result: + """ + Attributes: + - success + + """ + + def __init__( + self, + success=None, + ): + self.success = success + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRING: + self.success = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("getName_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.STRING, 0) + oprot.writeString(self.success.encode("utf-8") if sys.version_info[0] == 2 else self.success) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(getName_result) +getName_result.thrift_spec = ( + ( + 0, + TType.STRING, + "success", + "UTF8", + None, + ), # 0 +) + + +class getVersion_args: + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("getVersion_args") + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(getVersion_args) +getVersion_args.thrift_spec = () + + +class getVersion_result: + """ + Attributes: + - success + + """ + + def __init__( + self, + success=None, + ): + self.success = success + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRING: + self.success = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("getVersion_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.STRING, 0) + oprot.writeString(self.success.encode("utf-8") if sys.version_info[0] == 2 else self.success) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(getVersion_result) +getVersion_result.thrift_spec = ( + ( + 0, + TType.STRING, + "success", + "UTF8", + None, + ), # 0 +) + + +class getStatus_args: + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("getStatus_args") + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(getStatus_args) +getStatus_args.thrift_spec = () + + +class getStatus_result: + """ + Attributes: + - success + + """ + + def __init__( + self, + success=None, + ): + self.success = success + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.I32: + self.success = iprot.readI32() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("getStatus_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.I32, 0) + oprot.writeI32(self.success) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(getStatus_result) +getStatus_result.thrift_spec = ( + ( + 0, + TType.I32, + "success", + None, + None, + ), # 0 +) + + +class getStatusDetails_args: + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("getStatusDetails_args") + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(getStatusDetails_args) +getStatusDetails_args.thrift_spec = () + + +class getStatusDetails_result: + """ + Attributes: + - success + + """ + + def __init__( + self, + success=None, + ): + self.success = success + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRING: + self.success = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("getStatusDetails_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.STRING, 0) + oprot.writeString(self.success.encode("utf-8") if sys.version_info[0] == 2 else self.success) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(getStatusDetails_result) +getStatusDetails_result.thrift_spec = ( + ( + 0, + TType.STRING, + "success", + "UTF8", + None, + ), # 0 +) + + +class getCounters_args: + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("getCounters_args") + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(getCounters_args) +getCounters_args.thrift_spec = () + + +class getCounters_result: + """ + Attributes: + - success + + """ + + def __init__( + self, + success=None, + ): + self.success = success + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.MAP: + self.success = {} + (_ktype1, _vtype2, _size0) = iprot.readMapBegin() + for _i4 in range(_size0): + _key5 = ( + iprot.readString().decode("utf-8", errors="replace") + if sys.version_info[0] == 2 + else iprot.readString() + ) + _val6 = iprot.readI64() + self.success[_key5] = _val6 + iprot.readMapEnd() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("getCounters_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.MAP, 0) + oprot.writeMapBegin(TType.STRING, TType.I64, len(self.success)) + for kiter7, viter8 in self.success.items(): + oprot.writeString(kiter7.encode("utf-8") if sys.version_info[0] == 2 else kiter7) + oprot.writeI64(viter8) + oprot.writeMapEnd() + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(getCounters_result) +getCounters_result.thrift_spec = ( + ( + 0, + TType.MAP, + "success", + (TType.STRING, "UTF8", TType.I64, None, False), + None, + ), # 0 +) + + +class getCounter_args: + """ + Attributes: + - key + + """ + + def __init__( + self, + key=None, + ): + self.key = key + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.key = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("getCounter_args") + if self.key is not None: + oprot.writeFieldBegin("key", TType.STRING, 1) + oprot.writeString(self.key.encode("utf-8") if sys.version_info[0] == 2 else self.key) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(getCounter_args) +getCounter_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "key", + "UTF8", + None, + ), # 1 +) + + +class getCounter_result: + """ + Attributes: + - success + + """ + + def __init__( + self, + success=None, + ): + self.success = success + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.I64: + self.success = iprot.readI64() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("getCounter_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.I64, 0) + oprot.writeI64(self.success) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(getCounter_result) +getCounter_result.thrift_spec = ( + ( + 0, + TType.I64, + "success", + None, + None, + ), # 0 +) + + +class setOption_args: + """ + Attributes: + - key + - value + + """ + + def __init__( + self, + key=None, + value=None, + ): + self.key = key + self.value = value + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.key = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.value = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("setOption_args") + if self.key is not None: + oprot.writeFieldBegin("key", TType.STRING, 1) + oprot.writeString(self.key.encode("utf-8") if sys.version_info[0] == 2 else self.key) + oprot.writeFieldEnd() + if self.value is not None: + oprot.writeFieldBegin("value", TType.STRING, 2) + oprot.writeString(self.value.encode("utf-8") if sys.version_info[0] == 2 else self.value) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(setOption_args) +setOption_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "key", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "value", + "UTF8", + None, + ), # 2 +) + + +class setOption_result: + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("setOption_result") + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(setOption_result) +setOption_result.thrift_spec = () + + +class getOption_args: + """ + Attributes: + - key + + """ + + def __init__( + self, + key=None, + ): + self.key = key + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.key = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("getOption_args") + if self.key is not None: + oprot.writeFieldBegin("key", TType.STRING, 1) + oprot.writeString(self.key.encode("utf-8") if sys.version_info[0] == 2 else self.key) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(getOption_args) +getOption_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "key", + "UTF8", + None, + ), # 1 +) + + +class getOption_result: + """ + Attributes: + - success + + """ + + def __init__( + self, + success=None, + ): + self.success = success + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRING: + self.success = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("getOption_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.STRING, 0) + oprot.writeString(self.success.encode("utf-8") if sys.version_info[0] == 2 else self.success) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(getOption_result) +getOption_result.thrift_spec = ( + ( + 0, + TType.STRING, + "success", + "UTF8", + None, + ), # 0 +) + + +class getOptions_args: + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("getOptions_args") + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(getOptions_args) +getOptions_args.thrift_spec = () + + +class getOptions_result: + """ + Attributes: + - success + + """ + + def __init__( + self, + success=None, + ): + self.success = success + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.MAP: + self.success = {} + (_ktype10, _vtype11, _size9) = iprot.readMapBegin() + for _i13 in range(_size9): + _key14 = ( + iprot.readString().decode("utf-8", errors="replace") + if sys.version_info[0] == 2 + else iprot.readString() + ) + _val15 = ( + iprot.readString().decode("utf-8", errors="replace") + if sys.version_info[0] == 2 + else iprot.readString() + ) + self.success[_key14] = _val15 + iprot.readMapEnd() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("getOptions_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.MAP, 0) + oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.success)) + for kiter16, viter17 in self.success.items(): + oprot.writeString(kiter16.encode("utf-8") if sys.version_info[0] == 2 else kiter16) + oprot.writeString(viter17.encode("utf-8") if sys.version_info[0] == 2 else viter17) + oprot.writeMapEnd() + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(getOptions_result) +getOptions_result.thrift_spec = ( + ( + 0, + TType.MAP, + "success", + (TType.STRING, "UTF8", TType.STRING, "UTF8", False), + None, + ), # 0 +) + + +class getCpuProfile_args: + """ + Attributes: + - profileDurationInSec + + """ + + def __init__( + self, + profileDurationInSec=None, + ): + self.profileDurationInSec = profileDurationInSec + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.I32: + self.profileDurationInSec = iprot.readI32() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("getCpuProfile_args") + if self.profileDurationInSec is not None: + oprot.writeFieldBegin("profileDurationInSec", TType.I32, 1) + oprot.writeI32(self.profileDurationInSec) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(getCpuProfile_args) +getCpuProfile_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.I32, + "profileDurationInSec", + None, + None, + ), # 1 +) + + +class getCpuProfile_result: + """ + Attributes: + - success + + """ + + def __init__( + self, + success=None, + ): + self.success = success + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRING: + self.success = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("getCpuProfile_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.STRING, 0) + oprot.writeString(self.success.encode("utf-8") if sys.version_info[0] == 2 else self.success) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(getCpuProfile_result) +getCpuProfile_result.thrift_spec = ( + ( + 0, + TType.STRING, + "success", + "UTF8", + None, + ), # 0 +) + + +class aliveSince_args: + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("aliveSince_args") + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(aliveSince_args) +aliveSince_args.thrift_spec = () + + +class aliveSince_result: + """ + Attributes: + - success + + """ + + def __init__( + self, + success=None, + ): + self.success = success + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.I64: + self.success = iprot.readI64() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("aliveSince_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.I64, 0) + oprot.writeI64(self.success) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(aliveSince_result) +aliveSince_result.thrift_spec = ( + ( + 0, + TType.I64, + "success", + None, + None, + ), # 0 +) + + +class reinitialize_args: + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("reinitialize_args") + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(reinitialize_args) +reinitialize_args.thrift_spec = () + + +class shutdown_args: + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("shutdown_args") + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(shutdown_args) +shutdown_args.thrift_spec = () +fix_spec(all_structs) +del all_structs diff --git a/vendor/fb303/__init__.py b/vendor/fb303/__init__.py new file mode 100644 index 0000000000..398041beaf --- /dev/null +++ b/vendor/fb303/__init__.py @@ -0,0 +1,18 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +__all__ = ["ttypes", "constants", "FacebookService"] diff --git a/vendor/fb303/constants.py b/vendor/fb303/constants.py new file mode 100644 index 0000000000..3361fd3904 --- /dev/null +++ b/vendor/fb303/constants.py @@ -0,0 +1,26 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# +# Autogenerated by Thrift Compiler (0.16.0) +# +# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING +# +# options string: py +# + + + diff --git a/vendor/fb303/ttypes.py b/vendor/fb303/ttypes.py new file mode 100644 index 0000000000..bf2c4e2955 --- /dev/null +++ b/vendor/fb303/ttypes.py @@ -0,0 +1,64 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# +# Autogenerated by Thrift Compiler (0.16.0) +# +# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING +# +# options string: py +# + + +from thrift.TRecursive import fix_spec + +all_structs = [] + + +class fb_status: + """ + Common status reporting mechanism across all services + + """ + + DEAD = 0 + STARTING = 1 + ALIVE = 2 + STOPPING = 3 + STOPPED = 4 + WARNING = 5 + + _VALUES_TO_NAMES = { + 0: "DEAD", + 1: "STARTING", + 2: "ALIVE", + 3: "STOPPING", + 4: "STOPPED", + 5: "WARNING", + } + + _NAMES_TO_VALUES = { + "DEAD": 0, + "STARTING": 1, + "ALIVE": 2, + "STOPPING": 3, + "STOPPED": 4, + "WARNING": 5, + } + + +fix_spec(all_structs) +del all_structs diff --git a/vendor/hive_metastore/ThriftHiveMetastore.py b/vendor/hive_metastore/ThriftHiveMetastore.py new file mode 100644 index 0000000000..4d25c087c7 --- /dev/null +++ b/vendor/hive_metastore/ThriftHiveMetastore.py @@ -0,0 +1,72960 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# +# Autogenerated by Thrift Compiler (0.16.0) +# +# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING +# +# options string: py +# + +import logging +import sys + +import fb303.FacebookService +from thrift.Thrift import ( + TApplicationException, + TMessageType, + TProcessor, + TType, +) +from thrift.transport import TTransport +from thrift.TRecursive import fix_spec + +from .ttypes import * + +all_structs = [] + + +class Iface(fb303.FacebookService.Iface): + """ + This interface is live. + + """ + + def getMetaConf(self, key): + """ + Parameters: + - key + + """ + pass + + def setMetaConf(self, key, value): + """ + Parameters: + - key + - value + + """ + pass + + def create_catalog(self, catalog): + """ + Parameters: + - catalog + + """ + pass + + def alter_catalog(self, rqst): + """ + Parameters: + - rqst + + """ + pass + + def get_catalog(self, catName): + """ + Parameters: + - catName + + """ + pass + + def get_catalogs(self): + pass + + def drop_catalog(self, catName): + """ + Parameters: + - catName + + """ + pass + + def create_database(self, database): + """ + Parameters: + - database + + """ + pass + + def get_database(self, name): + """ + Parameters: + - name + + """ + pass + + def get_database_req(self, request): + """ + Parameters: + - request + + """ + pass + + def drop_database(self, name, deleteData, cascade): + """ + Parameters: + - name + - deleteData + - cascade + + """ + pass + + def drop_database_req(self, req): + """ + Parameters: + - req + + """ + pass + + def get_databases(self, pattern): + """ + Parameters: + - pattern + + """ + pass + + def get_all_databases(self): + pass + + def alter_database(self, dbname, db): + """ + Parameters: + - dbname + - db + + """ + pass + + def create_dataconnector(self, connector): + """ + Parameters: + - connector + + """ + pass + + def get_dataconnector_req(self, request): + """ + Parameters: + - request + + """ + pass + + def drop_dataconnector(self, name, ifNotExists, checkReferences): + """ + Parameters: + - name + - ifNotExists + - checkReferences + + """ + pass + + def get_dataconnectors(self): + pass + + def alter_dataconnector(self, name, connector): + """ + Parameters: + - name + - connector + + """ + pass + + def get_type(self, name): + """ + Parameters: + - name + + """ + pass + + def create_type(self, type): + """ + Parameters: + - type + + """ + pass + + def drop_type(self, type): + """ + Parameters: + - type + + """ + pass + + def get_type_all(self, name): + """ + Parameters: + - name + + """ + pass + + def get_fields(self, db_name, table_name): + """ + Parameters: + - db_name + - table_name + + """ + pass + + def get_fields_with_environment_context(self, db_name, table_name, environment_context): + """ + Parameters: + - db_name + - table_name + - environment_context + + """ + pass + + def get_fields_req(self, req): + """ + Parameters: + - req + + """ + pass + + def get_schema(self, db_name, table_name): + """ + Parameters: + - db_name + - table_name + + """ + pass + + def get_schema_with_environment_context(self, db_name, table_name, environment_context): + """ + Parameters: + - db_name + - table_name + - environment_context + + """ + pass + + def get_schema_req(self, req): + """ + Parameters: + - req + + """ + pass + + def create_table(self, tbl): + """ + Parameters: + - tbl + + """ + pass + + def create_table_with_environment_context(self, tbl, environment_context): + """ + Parameters: + - tbl + - environment_context + + """ + pass + + def create_table_with_constraints( + self, tbl, primaryKeys, foreignKeys, uniqueConstraints, notNullConstraints, defaultConstraints, checkConstraints + ): + """ + Parameters: + - tbl + - primaryKeys + - foreignKeys + - uniqueConstraints + - notNullConstraints + - defaultConstraints + - checkConstraints + + """ + pass + + def create_table_req(self, request): + """ + Parameters: + - request + + """ + pass + + def drop_constraint(self, req): + """ + Parameters: + - req + + """ + pass + + def add_primary_key(self, req): + """ + Parameters: + - req + + """ + pass + + def add_foreign_key(self, req): + """ + Parameters: + - req + + """ + pass + + def add_unique_constraint(self, req): + """ + Parameters: + - req + + """ + pass + + def add_not_null_constraint(self, req): + """ + Parameters: + - req + + """ + pass + + def add_default_constraint(self, req): + """ + Parameters: + - req + + """ + pass + + def add_check_constraint(self, req): + """ + Parameters: + - req + + """ + pass + + def translate_table_dryrun(self, request): + """ + Parameters: + - request + + """ + pass + + def drop_table(self, dbname, name, deleteData): + """ + Parameters: + - dbname + - name + - deleteData + + """ + pass + + def drop_table_with_environment_context(self, dbname, name, deleteData, environment_context): + """ + Parameters: + - dbname + - name + - deleteData + - environment_context + + """ + pass + + def truncate_table(self, dbName, tableName, partNames): + """ + Parameters: + - dbName + - tableName + - partNames + + """ + pass + + def truncate_table_req(self, req): + """ + Parameters: + - req + + """ + pass + + def get_tables(self, db_name, pattern): + """ + Parameters: + - db_name + - pattern + + """ + pass + + def get_tables_by_type(self, db_name, pattern, tableType): + """ + Parameters: + - db_name + - pattern + - tableType + + """ + pass + + def get_all_materialized_view_objects_for_rewriting(self): + pass + + def get_materialized_views_for_rewriting(self, db_name): + """ + Parameters: + - db_name + + """ + pass + + def get_table_meta(self, db_patterns, tbl_patterns, tbl_types): + """ + Parameters: + - db_patterns + - tbl_patterns + - tbl_types + + """ + pass + + def get_all_tables(self, db_name): + """ + Parameters: + - db_name + + """ + pass + + def get_table(self, dbname, tbl_name): + """ + Parameters: + - dbname + - tbl_name + + """ + pass + + def get_table_objects_by_name(self, dbname, tbl_names): + """ + Parameters: + - dbname + - tbl_names + + """ + pass + + def get_tables_ext(self, req): + """ + Parameters: + - req + + """ + pass + + def get_table_req(self, req): + """ + Parameters: + - req + + """ + pass + + def get_table_objects_by_name_req(self, req): + """ + Parameters: + - req + + """ + pass + + def get_materialization_invalidation_info(self, creation_metadata, validTxnList): + """ + Parameters: + - creation_metadata + - validTxnList + + """ + pass + + def update_creation_metadata(self, catName, dbname, tbl_name, creation_metadata): + """ + Parameters: + - catName + - dbname + - tbl_name + - creation_metadata + + """ + pass + + def get_table_names_by_filter(self, dbname, filter, max_tables): + """ + Parameters: + - dbname + - filter + - max_tables + + """ + pass + + def alter_table(self, dbname, tbl_name, new_tbl): + """ + Parameters: + - dbname + - tbl_name + - new_tbl + + """ + pass + + def alter_table_with_environment_context(self, dbname, tbl_name, new_tbl, environment_context): + """ + Parameters: + - dbname + - tbl_name + - new_tbl + - environment_context + + """ + pass + + def alter_table_with_cascade(self, dbname, tbl_name, new_tbl, cascade): + """ + Parameters: + - dbname + - tbl_name + - new_tbl + - cascade + + """ + pass + + def alter_table_req(self, req): + """ + Parameters: + - req + + """ + pass + + def add_partition(self, new_part): + """ + Parameters: + - new_part + + """ + pass + + def add_partition_with_environment_context(self, new_part, environment_context): + """ + Parameters: + - new_part + - environment_context + + """ + pass + + def add_partitions(self, new_parts): + """ + Parameters: + - new_parts + + """ + pass + + def add_partitions_pspec(self, new_parts): + """ + Parameters: + - new_parts + + """ + pass + + def append_partition(self, db_name, tbl_name, part_vals): + """ + Parameters: + - db_name + - tbl_name + - part_vals + + """ + pass + + def add_partitions_req(self, request): + """ + Parameters: + - request + + """ + pass + + def append_partition_with_environment_context(self, db_name, tbl_name, part_vals, environment_context): + """ + Parameters: + - db_name + - tbl_name + - part_vals + - environment_context + + """ + pass + + def append_partition_by_name(self, db_name, tbl_name, part_name): + """ + Parameters: + - db_name + - tbl_name + - part_name + + """ + pass + + def append_partition_by_name_with_environment_context(self, db_name, tbl_name, part_name, environment_context): + """ + Parameters: + - db_name + - tbl_name + - part_name + - environment_context + + """ + pass + + def drop_partition(self, db_name, tbl_name, part_vals, deleteData): + """ + Parameters: + - db_name + - tbl_name + - part_vals + - deleteData + + """ + pass + + def drop_partition_with_environment_context(self, db_name, tbl_name, part_vals, deleteData, environment_context): + """ + Parameters: + - db_name + - tbl_name + - part_vals + - deleteData + - environment_context + + """ + pass + + def drop_partition_by_name(self, db_name, tbl_name, part_name, deleteData): + """ + Parameters: + - db_name + - tbl_name + - part_name + - deleteData + + """ + pass + + def drop_partition_by_name_with_environment_context(self, db_name, tbl_name, part_name, deleteData, environment_context): + """ + Parameters: + - db_name + - tbl_name + - part_name + - deleteData + - environment_context + + """ + pass + + def drop_partitions_req(self, req): + """ + Parameters: + - req + + """ + pass + + def get_partition(self, db_name, tbl_name, part_vals): + """ + Parameters: + - db_name + - tbl_name + - part_vals + + """ + pass + + def get_partition_req(self, req): + """ + Parameters: + - req + + """ + pass + + def exchange_partition(self, partitionSpecs, source_db, source_table_name, dest_db, dest_table_name): + """ + Parameters: + - partitionSpecs + - source_db + - source_table_name + - dest_db + - dest_table_name + + """ + pass + + def exchange_partitions(self, partitionSpecs, source_db, source_table_name, dest_db, dest_table_name): + """ + Parameters: + - partitionSpecs + - source_db + - source_table_name + - dest_db + - dest_table_name + + """ + pass + + def get_partition_with_auth(self, db_name, tbl_name, part_vals, user_name, group_names): + """ + Parameters: + - db_name + - tbl_name + - part_vals + - user_name + - group_names + + """ + pass + + def get_partition_by_name(self, db_name, tbl_name, part_name): + """ + Parameters: + - db_name + - tbl_name + - part_name + + """ + pass + + def get_partitions(self, db_name, tbl_name, max_parts): + """ + Parameters: + - db_name + - tbl_name + - max_parts + + """ + pass + + def get_partitions_req(self, req): + """ + Parameters: + - req + + """ + pass + + def get_partitions_with_auth(self, db_name, tbl_name, max_parts, user_name, group_names): + """ + Parameters: + - db_name + - tbl_name + - max_parts + - user_name + - group_names + + """ + pass + + def get_partitions_pspec(self, db_name, tbl_name, max_parts): + """ + Parameters: + - db_name + - tbl_name + - max_parts + + """ + pass + + def get_partition_names(self, db_name, tbl_name, max_parts): + """ + Parameters: + - db_name + - tbl_name + - max_parts + + """ + pass + + def get_partition_values(self, request): + """ + Parameters: + - request + + """ + pass + + def get_partitions_ps(self, db_name, tbl_name, part_vals, max_parts): + """ + Parameters: + - db_name + - tbl_name + - part_vals + - max_parts + + """ + pass + + def get_partitions_ps_with_auth(self, db_name, tbl_name, part_vals, max_parts, user_name, group_names): + """ + Parameters: + - db_name + - tbl_name + - part_vals + - max_parts + - user_name + - group_names + + """ + pass + + def get_partitions_ps_with_auth_req(self, req): + """ + Parameters: + - req + + """ + pass + + def get_partition_names_ps(self, db_name, tbl_name, part_vals, max_parts): + """ + Parameters: + - db_name + - tbl_name + - part_vals + - max_parts + + """ + pass + + def get_partition_names_ps_req(self, req): + """ + Parameters: + - req + + """ + pass + + def get_partition_names_req(self, req): + """ + Parameters: + - req + + """ + pass + + def get_partitions_by_filter(self, db_name, tbl_name, filter, max_parts): + """ + Parameters: + - db_name + - tbl_name + - filter + - max_parts + + """ + pass + + def get_part_specs_by_filter(self, db_name, tbl_name, filter, max_parts): + """ + Parameters: + - db_name + - tbl_name + - filter + - max_parts + + """ + pass + + def get_partitions_by_expr(self, req): + """ + Parameters: + - req + + """ + pass + + def get_partitions_spec_by_expr(self, req): + """ + Parameters: + - req + + """ + pass + + def get_num_partitions_by_filter(self, db_name, tbl_name, filter): + """ + Parameters: + - db_name + - tbl_name + - filter + + """ + pass + + def get_partitions_by_names(self, db_name, tbl_name, names): + """ + Parameters: + - db_name + - tbl_name + - names + + """ + pass + + def get_partitions_by_names_req(self, req): + """ + Parameters: + - req + + """ + pass + + def alter_partition(self, db_name, tbl_name, new_part): + """ + Parameters: + - db_name + - tbl_name + - new_part + + """ + pass + + def alter_partitions(self, db_name, tbl_name, new_parts): + """ + Parameters: + - db_name + - tbl_name + - new_parts + + """ + pass + + def alter_partitions_with_environment_context(self, db_name, tbl_name, new_parts, environment_context): + """ + Parameters: + - db_name + - tbl_name + - new_parts + - environment_context + + """ + pass + + def alter_partitions_req(self, req): + """ + Parameters: + - req + + """ + pass + + def alter_partition_with_environment_context(self, db_name, tbl_name, new_part, environment_context): + """ + Parameters: + - db_name + - tbl_name + - new_part + - environment_context + + """ + pass + + def rename_partition(self, db_name, tbl_name, part_vals, new_part): + """ + Parameters: + - db_name + - tbl_name + - part_vals + - new_part + + """ + pass + + def rename_partition_req(self, req): + """ + Parameters: + - req + + """ + pass + + def partition_name_has_valid_characters(self, part_vals, throw_exception): + """ + Parameters: + - part_vals + - throw_exception + + """ + pass + + def get_config_value(self, name, defaultValue): + """ + Parameters: + - name + - defaultValue + + """ + pass + + def partition_name_to_vals(self, part_name): + """ + Parameters: + - part_name + + """ + pass + + def partition_name_to_spec(self, part_name): + """ + Parameters: + - part_name + + """ + pass + + def markPartitionForEvent(self, db_name, tbl_name, part_vals, eventType): + """ + Parameters: + - db_name + - tbl_name + - part_vals + - eventType + + """ + pass + + def isPartitionMarkedForEvent(self, db_name, tbl_name, part_vals, eventType): + """ + Parameters: + - db_name + - tbl_name + - part_vals + - eventType + + """ + pass + + def get_primary_keys(self, request): + """ + Parameters: + - request + + """ + pass + + def get_foreign_keys(self, request): + """ + Parameters: + - request + + """ + pass + + def get_unique_constraints(self, request): + """ + Parameters: + - request + + """ + pass + + def get_not_null_constraints(self, request): + """ + Parameters: + - request + + """ + pass + + def get_default_constraints(self, request): + """ + Parameters: + - request + + """ + pass + + def get_check_constraints(self, request): + """ + Parameters: + - request + + """ + pass + + def get_all_table_constraints(self, request): + """ + Parameters: + - request + + """ + pass + + def update_table_column_statistics(self, stats_obj): + """ + Parameters: + - stats_obj + + """ + pass + + def update_partition_column_statistics(self, stats_obj): + """ + Parameters: + - stats_obj + + """ + pass + + def update_table_column_statistics_req(self, req): + """ + Parameters: + - req + + """ + pass + + def update_partition_column_statistics_req(self, req): + """ + Parameters: + - req + + """ + pass + + def update_transaction_statistics(self, req): + """ + Parameters: + - req + + """ + pass + + def get_table_column_statistics(self, db_name, tbl_name, col_name): + """ + Parameters: + - db_name + - tbl_name + - col_name + + """ + pass + + def get_partition_column_statistics(self, db_name, tbl_name, part_name, col_name): + """ + Parameters: + - db_name + - tbl_name + - part_name + - col_name + + """ + pass + + def get_table_statistics_req(self, request): + """ + Parameters: + - request + + """ + pass + + def get_partitions_statistics_req(self, request): + """ + Parameters: + - request + + """ + pass + + def get_aggr_stats_for(self, request): + """ + Parameters: + - request + + """ + pass + + def set_aggr_stats_for(self, request): + """ + Parameters: + - request + + """ + pass + + def delete_partition_column_statistics(self, db_name, tbl_name, part_name, col_name, engine): + """ + Parameters: + - db_name + - tbl_name + - part_name + - col_name + - engine + + """ + pass + + def delete_table_column_statistics(self, db_name, tbl_name, col_name, engine): + """ + Parameters: + - db_name + - tbl_name + - col_name + - engine + + """ + pass + + def create_function(self, func): + """ + Parameters: + - func + + """ + pass + + def drop_function(self, dbName, funcName): + """ + Parameters: + - dbName + - funcName + + """ + pass + + def alter_function(self, dbName, funcName, newFunc): + """ + Parameters: + - dbName + - funcName + - newFunc + + """ + pass + + def get_functions(self, dbName, pattern): + """ + Parameters: + - dbName + - pattern + + """ + pass + + def get_function(self, dbName, funcName): + """ + Parameters: + - dbName + - funcName + + """ + pass + + def get_all_functions(self): + pass + + def create_role(self, role): + """ + Parameters: + - role + + """ + pass + + def drop_role(self, role_name): + """ + Parameters: + - role_name + + """ + pass + + def get_role_names(self): + pass + + def grant_role(self, role_name, principal_name, principal_type, grantor, grantorType, grant_option): + """ + Parameters: + - role_name + - principal_name + - principal_type + - grantor + - grantorType + - grant_option + + """ + pass + + def revoke_role(self, role_name, principal_name, principal_type): + """ + Parameters: + - role_name + - principal_name + - principal_type + + """ + pass + + def list_roles(self, principal_name, principal_type): + """ + Parameters: + - principal_name + - principal_type + + """ + pass + + def grant_revoke_role(self, request): + """ + Parameters: + - request + + """ + pass + + def get_principals_in_role(self, request): + """ + Parameters: + - request + + """ + pass + + def get_role_grants_for_principal(self, request): + """ + Parameters: + - request + + """ + pass + + def get_privilege_set(self, hiveObject, user_name, group_names): + """ + Parameters: + - hiveObject + - user_name + - group_names + + """ + pass + + def list_privileges(self, principal_name, principal_type, hiveObject): + """ + Parameters: + - principal_name + - principal_type + - hiveObject + + """ + pass + + def grant_privileges(self, privileges): + """ + Parameters: + - privileges + + """ + pass + + def revoke_privileges(self, privileges): + """ + Parameters: + - privileges + + """ + pass + + def grant_revoke_privileges(self, request): + """ + Parameters: + - request + + """ + pass + + def refresh_privileges(self, objToRefresh, authorizer, grantRequest): + """ + Parameters: + - objToRefresh + - authorizer + - grantRequest + + """ + pass + + def set_ugi(self, user_name, group_names): + """ + Parameters: + - user_name + - group_names + + """ + pass + + def get_delegation_token(self, token_owner, renewer_kerberos_principal_name): + """ + Parameters: + - token_owner + - renewer_kerberos_principal_name + + """ + pass + + def renew_delegation_token(self, token_str_form): + """ + Parameters: + - token_str_form + + """ + pass + + def cancel_delegation_token(self, token_str_form): + """ + Parameters: + - token_str_form + + """ + pass + + def add_token(self, token_identifier, delegation_token): + """ + Parameters: + - token_identifier + - delegation_token + + """ + pass + + def remove_token(self, token_identifier): + """ + Parameters: + - token_identifier + + """ + pass + + def get_token(self, token_identifier): + """ + Parameters: + - token_identifier + + """ + pass + + def get_all_token_identifiers(self): + pass + + def add_master_key(self, key): + """ + Parameters: + - key + + """ + pass + + def update_master_key(self, seq_number, key): + """ + Parameters: + - seq_number + - key + + """ + pass + + def remove_master_key(self, key_seq): + """ + Parameters: + - key_seq + + """ + pass + + def get_master_keys(self): + pass + + def get_open_txns(self): + pass + + def get_open_txns_info(self): + pass + + def open_txns(self, rqst): + """ + Parameters: + - rqst + + """ + pass + + def abort_txn(self, rqst): + """ + Parameters: + - rqst + + """ + pass + + def abort_txns(self, rqst): + """ + Parameters: + - rqst + + """ + pass + + def commit_txn(self, rqst): + """ + Parameters: + - rqst + + """ + pass + + def get_latest_txnid_in_conflict(self, txnId): + """ + Parameters: + - txnId + + """ + pass + + def repl_tbl_writeid_state(self, rqst): + """ + Parameters: + - rqst + + """ + pass + + def get_valid_write_ids(self, rqst): + """ + Parameters: + - rqst + + """ + pass + + def allocate_table_write_ids(self, rqst): + """ + Parameters: + - rqst + + """ + pass + + def get_max_allocated_table_write_id(self, rqst): + """ + Parameters: + - rqst + + """ + pass + + def seed_write_id(self, rqst): + """ + Parameters: + - rqst + + """ + pass + + def seed_txn_id(self, rqst): + """ + Parameters: + - rqst + + """ + pass + + def lock(self, rqst): + """ + Parameters: + - rqst + + """ + pass + + def check_lock(self, rqst): + """ + Parameters: + - rqst + + """ + pass + + def unlock(self, rqst): + """ + Parameters: + - rqst + + """ + pass + + def show_locks(self, rqst): + """ + Parameters: + - rqst + + """ + pass + + def heartbeat(self, ids): + """ + Parameters: + - ids + + """ + pass + + def heartbeat_txn_range(self, txns): + """ + Parameters: + - txns + + """ + pass + + def compact(self, rqst): + """ + Parameters: + - rqst + + """ + pass + + def compact2(self, rqst): + """ + Parameters: + - rqst + + """ + pass + + def show_compact(self, rqst): + """ + Parameters: + - rqst + + """ + pass + + def add_dynamic_partitions(self, rqst): + """ + Parameters: + - rqst + + """ + pass + + def find_next_compact(self, workerId): + """ + Parameters: + - workerId + + """ + pass + + def find_next_compact2(self, rqst): + """ + Parameters: + - rqst + + """ + pass + + def update_compactor_state(self, cr, txn_id): + """ + Parameters: + - cr + - txn_id + + """ + pass + + def find_columns_with_stats(self, cr): + """ + Parameters: + - cr + + """ + pass + + def mark_cleaned(self, cr): + """ + Parameters: + - cr + + """ + pass + + def mark_compacted(self, cr): + """ + Parameters: + - cr + + """ + pass + + def mark_failed(self, cr): + """ + Parameters: + - cr + + """ + pass + + def mark_refused(self, cr): + """ + Parameters: + - cr + + """ + pass + + def update_compaction_metrics_data(self, data): + """ + Parameters: + - data + + """ + pass + + def remove_compaction_metrics_data(self, request): + """ + Parameters: + - request + + """ + pass + + def set_hadoop_jobid(self, jobId, cq_id): + """ + Parameters: + - jobId + - cq_id + + """ + pass + + def get_latest_committed_compaction_info(self, rqst): + """ + Parameters: + - rqst + + """ + pass + + def get_next_notification(self, rqst): + """ + Parameters: + - rqst + + """ + pass + + def get_current_notificationEventId(self): + pass + + def get_notification_events_count(self, rqst): + """ + Parameters: + - rqst + + """ + pass + + def fire_listener_event(self, rqst): + """ + Parameters: + - rqst + + """ + pass + + def flushCache(self): + pass + + def add_write_notification_log(self, rqst): + """ + Parameters: + - rqst + + """ + pass + + def add_write_notification_log_in_batch(self, rqst): + """ + Parameters: + - rqst + + """ + pass + + def cm_recycle(self, request): + """ + Parameters: + - request + + """ + pass + + def get_file_metadata_by_expr(self, req): + """ + Parameters: + - req + + """ + pass + + def get_file_metadata(self, req): + """ + Parameters: + - req + + """ + pass + + def put_file_metadata(self, req): + """ + Parameters: + - req + + """ + pass + + def clear_file_metadata(self, req): + """ + Parameters: + - req + + """ + pass + + def cache_file_metadata(self, req): + """ + Parameters: + - req + + """ + pass + + def get_metastore_db_uuid(self): + pass + + def create_resource_plan(self, request): + """ + Parameters: + - request + + """ + pass + + def get_resource_plan(self, request): + """ + Parameters: + - request + + """ + pass + + def get_active_resource_plan(self, request): + """ + Parameters: + - request + + """ + pass + + def get_all_resource_plans(self, request): + """ + Parameters: + - request + + """ + pass + + def alter_resource_plan(self, request): + """ + Parameters: + - request + + """ + pass + + def validate_resource_plan(self, request): + """ + Parameters: + - request + + """ + pass + + def drop_resource_plan(self, request): + """ + Parameters: + - request + + """ + pass + + def create_wm_trigger(self, request): + """ + Parameters: + - request + + """ + pass + + def alter_wm_trigger(self, request): + """ + Parameters: + - request + + """ + pass + + def drop_wm_trigger(self, request): + """ + Parameters: + - request + + """ + pass + + def get_triggers_for_resourceplan(self, request): + """ + Parameters: + - request + + """ + pass + + def create_wm_pool(self, request): + """ + Parameters: + - request + + """ + pass + + def alter_wm_pool(self, request): + """ + Parameters: + - request + + """ + pass + + def drop_wm_pool(self, request): + """ + Parameters: + - request + + """ + pass + + def create_or_update_wm_mapping(self, request): + """ + Parameters: + - request + + """ + pass + + def drop_wm_mapping(self, request): + """ + Parameters: + - request + + """ + pass + + def create_or_drop_wm_trigger_to_pool_mapping(self, request): + """ + Parameters: + - request + + """ + pass + + def create_ischema(self, schema): + """ + Parameters: + - schema + + """ + pass + + def alter_ischema(self, rqst): + """ + Parameters: + - rqst + + """ + pass + + def get_ischema(self, name): + """ + Parameters: + - name + + """ + pass + + def drop_ischema(self, name): + """ + Parameters: + - name + + """ + pass + + def add_schema_version(self, schemaVersion): + """ + Parameters: + - schemaVersion + + """ + pass + + def get_schema_version(self, schemaVersion): + """ + Parameters: + - schemaVersion + + """ + pass + + def get_schema_latest_version(self, schemaName): + """ + Parameters: + - schemaName + + """ + pass + + def get_schema_all_versions(self, schemaName): + """ + Parameters: + - schemaName + + """ + pass + + def drop_schema_version(self, schemaVersion): + """ + Parameters: + - schemaVersion + + """ + pass + + def get_schemas_by_cols(self, rqst): + """ + Parameters: + - rqst + + """ + pass + + def map_schema_version_to_serde(self, rqst): + """ + Parameters: + - rqst + + """ + pass + + def set_schema_version_state(self, rqst): + """ + Parameters: + - rqst + + """ + pass + + def add_serde(self, serde): + """ + Parameters: + - serde + + """ + pass + + def get_serde(self, rqst): + """ + Parameters: + - rqst + + """ + pass + + def get_lock_materialization_rebuild(self, dbName, tableName, txnId): + """ + Parameters: + - dbName + - tableName + - txnId + + """ + pass + + def heartbeat_lock_materialization_rebuild(self, dbName, tableName, txnId): + """ + Parameters: + - dbName + - tableName + - txnId + + """ + pass + + def add_runtime_stats(self, stat): + """ + Parameters: + - stat + + """ + pass + + def get_runtime_stats(self, rqst): + """ + Parameters: + - rqst + + """ + pass + + def get_partitions_with_specs(self, request): + """ + Parameters: + - request + + """ + pass + + def scheduled_query_poll(self, request): + """ + Parameters: + - request + + """ + pass + + def scheduled_query_maintenance(self, request): + """ + Parameters: + - request + + """ + pass + + def scheduled_query_progress(self, info): + """ + Parameters: + - info + + """ + pass + + def get_scheduled_query(self, scheduleKey): + """ + Parameters: + - scheduleKey + + """ + pass + + def add_replication_metrics(self, replicationMetricList): + """ + Parameters: + - replicationMetricList + + """ + pass + + def get_replication_metrics(self, rqst): + """ + Parameters: + - rqst + + """ + pass + + def get_open_txns_req(self, getOpenTxnsRequest): + """ + Parameters: + - getOpenTxnsRequest + + """ + pass + + def create_stored_procedure(self, proc): + """ + Parameters: + - proc + + """ + pass + + def get_stored_procedure(self, request): + """ + Parameters: + - request + + """ + pass + + def drop_stored_procedure(self, request): + """ + Parameters: + - request + + """ + pass + + def get_all_stored_procedures(self, request): + """ + Parameters: + - request + + """ + pass + + def find_package(self, request): + """ + Parameters: + - request + + """ + pass + + def add_package(self, request): + """ + Parameters: + - request + + """ + pass + + def get_all_packages(self, request): + """ + Parameters: + - request + + """ + pass + + def drop_package(self, request): + """ + Parameters: + - request + + """ + pass + + def get_all_write_event_info(self, request): + """ + Parameters: + - request + + """ + pass + + +class Client(fb303.FacebookService.Client, Iface): + """ + This interface is live. + + """ + + def __init__(self, iprot, oprot=None): + fb303.FacebookService.Client.__init__(self, iprot, oprot) + + def getMetaConf(self, key): + """ + Parameters: + - key + + """ + self.send_getMetaConf(key) + return self.recv_getMetaConf() + + def send_getMetaConf(self, key): + self._oprot.writeMessageBegin("getMetaConf", TMessageType.CALL, self._seqid) + args = getMetaConf_args() + args.key = key + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_getMetaConf(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = getMetaConf_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + raise TApplicationException(TApplicationException.MISSING_RESULT, "getMetaConf failed: unknown result") + + def setMetaConf(self, key, value): + """ + Parameters: + - key + - value + + """ + self.send_setMetaConf(key, value) + self.recv_setMetaConf() + + def send_setMetaConf(self, key, value): + self._oprot.writeMessageBegin("setMetaConf", TMessageType.CALL, self._seqid) + args = setMetaConf_args() + args.key = key + args.value = value + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_setMetaConf(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = setMetaConf_result() + result.read(iprot) + iprot.readMessageEnd() + if result.o1 is not None: + raise result.o1 + return + + def create_catalog(self, catalog): + """ + Parameters: + - catalog + + """ + self.send_create_catalog(catalog) + self.recv_create_catalog() + + def send_create_catalog(self, catalog): + self._oprot.writeMessageBegin("create_catalog", TMessageType.CALL, self._seqid) + args = create_catalog_args() + args.catalog = catalog + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_create_catalog(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = create_catalog_result() + result.read(iprot) + iprot.readMessageEnd() + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + if result.o3 is not None: + raise result.o3 + return + + def alter_catalog(self, rqst): + """ + Parameters: + - rqst + + """ + self.send_alter_catalog(rqst) + self.recv_alter_catalog() + + def send_alter_catalog(self, rqst): + self._oprot.writeMessageBegin("alter_catalog", TMessageType.CALL, self._seqid) + args = alter_catalog_args() + args.rqst = rqst + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_alter_catalog(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = alter_catalog_result() + result.read(iprot) + iprot.readMessageEnd() + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + if result.o3 is not None: + raise result.o3 + return + + def get_catalog(self, catName): + """ + Parameters: + - catName + + """ + self.send_get_catalog(catName) + return self.recv_get_catalog() + + def send_get_catalog(self, catName): + self._oprot.writeMessageBegin("get_catalog", TMessageType.CALL, self._seqid) + args = get_catalog_args() + args.catName = catName + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_get_catalog(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = get_catalog_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + raise TApplicationException(TApplicationException.MISSING_RESULT, "get_catalog failed: unknown result") + + def get_catalogs(self): + self.send_get_catalogs() + return self.recv_get_catalogs() + + def send_get_catalogs(self): + self._oprot.writeMessageBegin("get_catalogs", TMessageType.CALL, self._seqid) + args = get_catalogs_args() + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_get_catalogs(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = get_catalogs_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + raise TApplicationException(TApplicationException.MISSING_RESULT, "get_catalogs failed: unknown result") + + def drop_catalog(self, catName): + """ + Parameters: + - catName + + """ + self.send_drop_catalog(catName) + self.recv_drop_catalog() + + def send_drop_catalog(self, catName): + self._oprot.writeMessageBegin("drop_catalog", TMessageType.CALL, self._seqid) + args = drop_catalog_args() + args.catName = catName + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_drop_catalog(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = drop_catalog_result() + result.read(iprot) + iprot.readMessageEnd() + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + if result.o3 is not None: + raise result.o3 + return + + def create_database(self, database): + """ + Parameters: + - database + + """ + self.send_create_database(database) + self.recv_create_database() + + def send_create_database(self, database): + self._oprot.writeMessageBegin("create_database", TMessageType.CALL, self._seqid) + args = create_database_args() + args.database = database + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_create_database(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = create_database_result() + result.read(iprot) + iprot.readMessageEnd() + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + if result.o3 is not None: + raise result.o3 + return + + def get_database(self, name): + """ + Parameters: + - name + + """ + self.send_get_database(name) + return self.recv_get_database() + + def send_get_database(self, name): + self._oprot.writeMessageBegin("get_database", TMessageType.CALL, self._seqid) + args = get_database_args() + args.name = name + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_get_database(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = get_database_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + raise TApplicationException(TApplicationException.MISSING_RESULT, "get_database failed: unknown result") + + def get_database_req(self, request): + """ + Parameters: + - request + + """ + self.send_get_database_req(request) + return self.recv_get_database_req() + + def send_get_database_req(self, request): + self._oprot.writeMessageBegin("get_database_req", TMessageType.CALL, self._seqid) + args = get_database_req_args() + args.request = request + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_get_database_req(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = get_database_req_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + raise TApplicationException(TApplicationException.MISSING_RESULT, "get_database_req failed: unknown result") + + def drop_database(self, name, deleteData, cascade): + """ + Parameters: + - name + - deleteData + - cascade + + """ + self.send_drop_database(name, deleteData, cascade) + self.recv_drop_database() + + def send_drop_database(self, name, deleteData, cascade): + self._oprot.writeMessageBegin("drop_database", TMessageType.CALL, self._seqid) + args = drop_database_args() + args.name = name + args.deleteData = deleteData + args.cascade = cascade + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_drop_database(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = drop_database_result() + result.read(iprot) + iprot.readMessageEnd() + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + if result.o3 is not None: + raise result.o3 + return + + def drop_database_req(self, req): + """ + Parameters: + - req + + """ + self.send_drop_database_req(req) + self.recv_drop_database_req() + + def send_drop_database_req(self, req): + self._oprot.writeMessageBegin("drop_database_req", TMessageType.CALL, self._seqid) + args = drop_database_req_args() + args.req = req + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_drop_database_req(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = drop_database_req_result() + result.read(iprot) + iprot.readMessageEnd() + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + if result.o3 is not None: + raise result.o3 + return + + def get_databases(self, pattern): + """ + Parameters: + - pattern + + """ + self.send_get_databases(pattern) + return self.recv_get_databases() + + def send_get_databases(self, pattern): + self._oprot.writeMessageBegin("get_databases", TMessageType.CALL, self._seqid) + args = get_databases_args() + args.pattern = pattern + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_get_databases(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = get_databases_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + raise TApplicationException(TApplicationException.MISSING_RESULT, "get_databases failed: unknown result") + + def get_all_databases(self): + self.send_get_all_databases() + return self.recv_get_all_databases() + + def send_get_all_databases(self): + self._oprot.writeMessageBegin("get_all_databases", TMessageType.CALL, self._seqid) + args = get_all_databases_args() + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_get_all_databases(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = get_all_databases_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + raise TApplicationException(TApplicationException.MISSING_RESULT, "get_all_databases failed: unknown result") + + def alter_database(self, dbname, db): + """ + Parameters: + - dbname + - db + + """ + self.send_alter_database(dbname, db) + self.recv_alter_database() + + def send_alter_database(self, dbname, db): + self._oprot.writeMessageBegin("alter_database", TMessageType.CALL, self._seqid) + args = alter_database_args() + args.dbname = dbname + args.db = db + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_alter_database(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = alter_database_result() + result.read(iprot) + iprot.readMessageEnd() + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + return + + def create_dataconnector(self, connector): + """ + Parameters: + - connector + + """ + self.send_create_dataconnector(connector) + self.recv_create_dataconnector() + + def send_create_dataconnector(self, connector): + self._oprot.writeMessageBegin("create_dataconnector", TMessageType.CALL, self._seqid) + args = create_dataconnector_args() + args.connector = connector + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_create_dataconnector(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = create_dataconnector_result() + result.read(iprot) + iprot.readMessageEnd() + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + if result.o3 is not None: + raise result.o3 + return + + def get_dataconnector_req(self, request): + """ + Parameters: + - request + + """ + self.send_get_dataconnector_req(request) + return self.recv_get_dataconnector_req() + + def send_get_dataconnector_req(self, request): + self._oprot.writeMessageBegin("get_dataconnector_req", TMessageType.CALL, self._seqid) + args = get_dataconnector_req_args() + args.request = request + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_get_dataconnector_req(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = get_dataconnector_req_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + raise TApplicationException(TApplicationException.MISSING_RESULT, "get_dataconnector_req failed: unknown result") + + def drop_dataconnector(self, name, ifNotExists, checkReferences): + """ + Parameters: + - name + - ifNotExists + - checkReferences + + """ + self.send_drop_dataconnector(name, ifNotExists, checkReferences) + self.recv_drop_dataconnector() + + def send_drop_dataconnector(self, name, ifNotExists, checkReferences): + self._oprot.writeMessageBegin("drop_dataconnector", TMessageType.CALL, self._seqid) + args = drop_dataconnector_args() + args.name = name + args.ifNotExists = ifNotExists + args.checkReferences = checkReferences + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_drop_dataconnector(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = drop_dataconnector_result() + result.read(iprot) + iprot.readMessageEnd() + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + if result.o3 is not None: + raise result.o3 + return + + def get_dataconnectors(self): + self.send_get_dataconnectors() + return self.recv_get_dataconnectors() + + def send_get_dataconnectors(self): + self._oprot.writeMessageBegin("get_dataconnectors", TMessageType.CALL, self._seqid) + args = get_dataconnectors_args() + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_get_dataconnectors(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = get_dataconnectors_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + raise TApplicationException(TApplicationException.MISSING_RESULT, "get_dataconnectors failed: unknown result") + + def alter_dataconnector(self, name, connector): + """ + Parameters: + - name + - connector + + """ + self.send_alter_dataconnector(name, connector) + self.recv_alter_dataconnector() + + def send_alter_dataconnector(self, name, connector): + self._oprot.writeMessageBegin("alter_dataconnector", TMessageType.CALL, self._seqid) + args = alter_dataconnector_args() + args.name = name + args.connector = connector + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_alter_dataconnector(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = alter_dataconnector_result() + result.read(iprot) + iprot.readMessageEnd() + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + return + + def get_type(self, name): + """ + Parameters: + - name + + """ + self.send_get_type(name) + return self.recv_get_type() + + def send_get_type(self, name): + self._oprot.writeMessageBegin("get_type", TMessageType.CALL, self._seqid) + args = get_type_args() + args.name = name + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_get_type(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = get_type_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + raise TApplicationException(TApplicationException.MISSING_RESULT, "get_type failed: unknown result") + + def create_type(self, type): + """ + Parameters: + - type + + """ + self.send_create_type(type) + return self.recv_create_type() + + def send_create_type(self, type): + self._oprot.writeMessageBegin("create_type", TMessageType.CALL, self._seqid) + args = create_type_args() + args.type = type + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_create_type(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = create_type_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + if result.o3 is not None: + raise result.o3 + raise TApplicationException(TApplicationException.MISSING_RESULT, "create_type failed: unknown result") + + def drop_type(self, type): + """ + Parameters: + - type + + """ + self.send_drop_type(type) + return self.recv_drop_type() + + def send_drop_type(self, type): + self._oprot.writeMessageBegin("drop_type", TMessageType.CALL, self._seqid) + args = drop_type_args() + args.type = type + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_drop_type(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = drop_type_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + raise TApplicationException(TApplicationException.MISSING_RESULT, "drop_type failed: unknown result") + + def get_type_all(self, name): + """ + Parameters: + - name + + """ + self.send_get_type_all(name) + return self.recv_get_type_all() + + def send_get_type_all(self, name): + self._oprot.writeMessageBegin("get_type_all", TMessageType.CALL, self._seqid) + args = get_type_all_args() + args.name = name + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_get_type_all(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = get_type_all_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o2 is not None: + raise result.o2 + raise TApplicationException(TApplicationException.MISSING_RESULT, "get_type_all failed: unknown result") + + def get_fields(self, db_name, table_name): + """ + Parameters: + - db_name + - table_name + + """ + self.send_get_fields(db_name, table_name) + return self.recv_get_fields() + + def send_get_fields(self, db_name, table_name): + self._oprot.writeMessageBegin("get_fields", TMessageType.CALL, self._seqid) + args = get_fields_args() + args.db_name = db_name + args.table_name = table_name + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_get_fields(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = get_fields_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + if result.o3 is not None: + raise result.o3 + raise TApplicationException(TApplicationException.MISSING_RESULT, "get_fields failed: unknown result") + + def get_fields_with_environment_context(self, db_name, table_name, environment_context): + """ + Parameters: + - db_name + - table_name + - environment_context + + """ + self.send_get_fields_with_environment_context(db_name, table_name, environment_context) + return self.recv_get_fields_with_environment_context() + + def send_get_fields_with_environment_context(self, db_name, table_name, environment_context): + self._oprot.writeMessageBegin("get_fields_with_environment_context", TMessageType.CALL, self._seqid) + args = get_fields_with_environment_context_args() + args.db_name = db_name + args.table_name = table_name + args.environment_context = environment_context + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_get_fields_with_environment_context(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = get_fields_with_environment_context_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + if result.o3 is not None: + raise result.o3 + raise TApplicationException( + TApplicationException.MISSING_RESULT, "get_fields_with_environment_context failed: unknown result" + ) + + def get_fields_req(self, req): + """ + Parameters: + - req + + """ + self.send_get_fields_req(req) + return self.recv_get_fields_req() + + def send_get_fields_req(self, req): + self._oprot.writeMessageBegin("get_fields_req", TMessageType.CALL, self._seqid) + args = get_fields_req_args() + args.req = req + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_get_fields_req(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = get_fields_req_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + if result.o3 is not None: + raise result.o3 + raise TApplicationException(TApplicationException.MISSING_RESULT, "get_fields_req failed: unknown result") + + def get_schema(self, db_name, table_name): + """ + Parameters: + - db_name + - table_name + + """ + self.send_get_schema(db_name, table_name) + return self.recv_get_schema() + + def send_get_schema(self, db_name, table_name): + self._oprot.writeMessageBegin("get_schema", TMessageType.CALL, self._seqid) + args = get_schema_args() + args.db_name = db_name + args.table_name = table_name + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_get_schema(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = get_schema_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + if result.o3 is not None: + raise result.o3 + raise TApplicationException(TApplicationException.MISSING_RESULT, "get_schema failed: unknown result") + + def get_schema_with_environment_context(self, db_name, table_name, environment_context): + """ + Parameters: + - db_name + - table_name + - environment_context + + """ + self.send_get_schema_with_environment_context(db_name, table_name, environment_context) + return self.recv_get_schema_with_environment_context() + + def send_get_schema_with_environment_context(self, db_name, table_name, environment_context): + self._oprot.writeMessageBegin("get_schema_with_environment_context", TMessageType.CALL, self._seqid) + args = get_schema_with_environment_context_args() + args.db_name = db_name + args.table_name = table_name + args.environment_context = environment_context + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_get_schema_with_environment_context(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = get_schema_with_environment_context_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + if result.o3 is not None: + raise result.o3 + raise TApplicationException( + TApplicationException.MISSING_RESULT, "get_schema_with_environment_context failed: unknown result" + ) + + def get_schema_req(self, req): + """ + Parameters: + - req + + """ + self.send_get_schema_req(req) + return self.recv_get_schema_req() + + def send_get_schema_req(self, req): + self._oprot.writeMessageBegin("get_schema_req", TMessageType.CALL, self._seqid) + args = get_schema_req_args() + args.req = req + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_get_schema_req(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = get_schema_req_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + if result.o3 is not None: + raise result.o3 + raise TApplicationException(TApplicationException.MISSING_RESULT, "get_schema_req failed: unknown result") + + def create_table(self, tbl): + """ + Parameters: + - tbl + + """ + self.send_create_table(tbl) + self.recv_create_table() + + def send_create_table(self, tbl): + self._oprot.writeMessageBegin("create_table", TMessageType.CALL, self._seqid) + args = create_table_args() + args.tbl = tbl + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_create_table(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = create_table_result() + result.read(iprot) + iprot.readMessageEnd() + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + if result.o3 is not None: + raise result.o3 + if result.o4 is not None: + raise result.o4 + return + + def create_table_with_environment_context(self, tbl, environment_context): + """ + Parameters: + - tbl + - environment_context + + """ + self.send_create_table_with_environment_context(tbl, environment_context) + self.recv_create_table_with_environment_context() + + def send_create_table_with_environment_context(self, tbl, environment_context): + self._oprot.writeMessageBegin("create_table_with_environment_context", TMessageType.CALL, self._seqid) + args = create_table_with_environment_context_args() + args.tbl = tbl + args.environment_context = environment_context + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_create_table_with_environment_context(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = create_table_with_environment_context_result() + result.read(iprot) + iprot.readMessageEnd() + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + if result.o3 is not None: + raise result.o3 + if result.o4 is not None: + raise result.o4 + return + + def create_table_with_constraints( + self, tbl, primaryKeys, foreignKeys, uniqueConstraints, notNullConstraints, defaultConstraints, checkConstraints + ): + """ + Parameters: + - tbl + - primaryKeys + - foreignKeys + - uniqueConstraints + - notNullConstraints + - defaultConstraints + - checkConstraints + + """ + self.send_create_table_with_constraints( + tbl, primaryKeys, foreignKeys, uniqueConstraints, notNullConstraints, defaultConstraints, checkConstraints + ) + self.recv_create_table_with_constraints() + + def send_create_table_with_constraints( + self, tbl, primaryKeys, foreignKeys, uniqueConstraints, notNullConstraints, defaultConstraints, checkConstraints + ): + self._oprot.writeMessageBegin("create_table_with_constraints", TMessageType.CALL, self._seqid) + args = create_table_with_constraints_args() + args.tbl = tbl + args.primaryKeys = primaryKeys + args.foreignKeys = foreignKeys + args.uniqueConstraints = uniqueConstraints + args.notNullConstraints = notNullConstraints + args.defaultConstraints = defaultConstraints + args.checkConstraints = checkConstraints + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_create_table_with_constraints(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = create_table_with_constraints_result() + result.read(iprot) + iprot.readMessageEnd() + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + if result.o3 is not None: + raise result.o3 + if result.o4 is not None: + raise result.o4 + return + + def create_table_req(self, request): + """ + Parameters: + - request + + """ + self.send_create_table_req(request) + self.recv_create_table_req() + + def send_create_table_req(self, request): + self._oprot.writeMessageBegin("create_table_req", TMessageType.CALL, self._seqid) + args = create_table_req_args() + args.request = request + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_create_table_req(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = create_table_req_result() + result.read(iprot) + iprot.readMessageEnd() + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + if result.o3 is not None: + raise result.o3 + if result.o4 is not None: + raise result.o4 + return + + def drop_constraint(self, req): + """ + Parameters: + - req + + """ + self.send_drop_constraint(req) + self.recv_drop_constraint() + + def send_drop_constraint(self, req): + self._oprot.writeMessageBegin("drop_constraint", TMessageType.CALL, self._seqid) + args = drop_constraint_args() + args.req = req + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_drop_constraint(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = drop_constraint_result() + result.read(iprot) + iprot.readMessageEnd() + if result.o1 is not None: + raise result.o1 + if result.o3 is not None: + raise result.o3 + return + + def add_primary_key(self, req): + """ + Parameters: + - req + + """ + self.send_add_primary_key(req) + self.recv_add_primary_key() + + def send_add_primary_key(self, req): + self._oprot.writeMessageBegin("add_primary_key", TMessageType.CALL, self._seqid) + args = add_primary_key_args() + args.req = req + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_add_primary_key(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = add_primary_key_result() + result.read(iprot) + iprot.readMessageEnd() + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + return + + def add_foreign_key(self, req): + """ + Parameters: + - req + + """ + self.send_add_foreign_key(req) + self.recv_add_foreign_key() + + def send_add_foreign_key(self, req): + self._oprot.writeMessageBegin("add_foreign_key", TMessageType.CALL, self._seqid) + args = add_foreign_key_args() + args.req = req + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_add_foreign_key(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = add_foreign_key_result() + result.read(iprot) + iprot.readMessageEnd() + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + return + + def add_unique_constraint(self, req): + """ + Parameters: + - req + + """ + self.send_add_unique_constraint(req) + self.recv_add_unique_constraint() + + def send_add_unique_constraint(self, req): + self._oprot.writeMessageBegin("add_unique_constraint", TMessageType.CALL, self._seqid) + args = add_unique_constraint_args() + args.req = req + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_add_unique_constraint(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = add_unique_constraint_result() + result.read(iprot) + iprot.readMessageEnd() + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + return + + def add_not_null_constraint(self, req): + """ + Parameters: + - req + + """ + self.send_add_not_null_constraint(req) + self.recv_add_not_null_constraint() + + def send_add_not_null_constraint(self, req): + self._oprot.writeMessageBegin("add_not_null_constraint", TMessageType.CALL, self._seqid) + args = add_not_null_constraint_args() + args.req = req + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_add_not_null_constraint(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = add_not_null_constraint_result() + result.read(iprot) + iprot.readMessageEnd() + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + return + + def add_default_constraint(self, req): + """ + Parameters: + - req + + """ + self.send_add_default_constraint(req) + self.recv_add_default_constraint() + + def send_add_default_constraint(self, req): + self._oprot.writeMessageBegin("add_default_constraint", TMessageType.CALL, self._seqid) + args = add_default_constraint_args() + args.req = req + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_add_default_constraint(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = add_default_constraint_result() + result.read(iprot) + iprot.readMessageEnd() + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + return + + def add_check_constraint(self, req): + """ + Parameters: + - req + + """ + self.send_add_check_constraint(req) + self.recv_add_check_constraint() + + def send_add_check_constraint(self, req): + self._oprot.writeMessageBegin("add_check_constraint", TMessageType.CALL, self._seqid) + args = add_check_constraint_args() + args.req = req + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_add_check_constraint(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = add_check_constraint_result() + result.read(iprot) + iprot.readMessageEnd() + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + return + + def translate_table_dryrun(self, request): + """ + Parameters: + - request + + """ + self.send_translate_table_dryrun(request) + return self.recv_translate_table_dryrun() + + def send_translate_table_dryrun(self, request): + self._oprot.writeMessageBegin("translate_table_dryrun", TMessageType.CALL, self._seqid) + args = translate_table_dryrun_args() + args.request = request + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_translate_table_dryrun(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = translate_table_dryrun_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + if result.o3 is not None: + raise result.o3 + if result.o4 is not None: + raise result.o4 + raise TApplicationException(TApplicationException.MISSING_RESULT, "translate_table_dryrun failed: unknown result") + + def drop_table(self, dbname, name, deleteData): + """ + Parameters: + - dbname + - name + - deleteData + + """ + self.send_drop_table(dbname, name, deleteData) + self.recv_drop_table() + + def send_drop_table(self, dbname, name, deleteData): + self._oprot.writeMessageBegin("drop_table", TMessageType.CALL, self._seqid) + args = drop_table_args() + args.dbname = dbname + args.name = name + args.deleteData = deleteData + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_drop_table(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = drop_table_result() + result.read(iprot) + iprot.readMessageEnd() + if result.o1 is not None: + raise result.o1 + if result.o3 is not None: + raise result.o3 + return + + def drop_table_with_environment_context(self, dbname, name, deleteData, environment_context): + """ + Parameters: + - dbname + - name + - deleteData + - environment_context + + """ + self.send_drop_table_with_environment_context(dbname, name, deleteData, environment_context) + self.recv_drop_table_with_environment_context() + + def send_drop_table_with_environment_context(self, dbname, name, deleteData, environment_context): + self._oprot.writeMessageBegin("drop_table_with_environment_context", TMessageType.CALL, self._seqid) + args = drop_table_with_environment_context_args() + args.dbname = dbname + args.name = name + args.deleteData = deleteData + args.environment_context = environment_context + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_drop_table_with_environment_context(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = drop_table_with_environment_context_result() + result.read(iprot) + iprot.readMessageEnd() + if result.o1 is not None: + raise result.o1 + if result.o3 is not None: + raise result.o3 + return + + def truncate_table(self, dbName, tableName, partNames): + """ + Parameters: + - dbName + - tableName + - partNames + + """ + self.send_truncate_table(dbName, tableName, partNames) + self.recv_truncate_table() + + def send_truncate_table(self, dbName, tableName, partNames): + self._oprot.writeMessageBegin("truncate_table", TMessageType.CALL, self._seqid) + args = truncate_table_args() + args.dbName = dbName + args.tableName = tableName + args.partNames = partNames + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_truncate_table(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = truncate_table_result() + result.read(iprot) + iprot.readMessageEnd() + if result.o1 is not None: + raise result.o1 + return + + def truncate_table_req(self, req): + """ + Parameters: + - req + + """ + self.send_truncate_table_req(req) + return self.recv_truncate_table_req() + + def send_truncate_table_req(self, req): + self._oprot.writeMessageBegin("truncate_table_req", TMessageType.CALL, self._seqid) + args = truncate_table_req_args() + args.req = req + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_truncate_table_req(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = truncate_table_req_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + raise TApplicationException(TApplicationException.MISSING_RESULT, "truncate_table_req failed: unknown result") + + def get_tables(self, db_name, pattern): + """ + Parameters: + - db_name + - pattern + + """ + self.send_get_tables(db_name, pattern) + return self.recv_get_tables() + + def send_get_tables(self, db_name, pattern): + self._oprot.writeMessageBegin("get_tables", TMessageType.CALL, self._seqid) + args = get_tables_args() + args.db_name = db_name + args.pattern = pattern + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_get_tables(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = get_tables_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + raise TApplicationException(TApplicationException.MISSING_RESULT, "get_tables failed: unknown result") + + def get_tables_by_type(self, db_name, pattern, tableType): + """ + Parameters: + - db_name + - pattern + - tableType + + """ + self.send_get_tables_by_type(db_name, pattern, tableType) + return self.recv_get_tables_by_type() + + def send_get_tables_by_type(self, db_name, pattern, tableType): + self._oprot.writeMessageBegin("get_tables_by_type", TMessageType.CALL, self._seqid) + args = get_tables_by_type_args() + args.db_name = db_name + args.pattern = pattern + args.tableType = tableType + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_get_tables_by_type(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = get_tables_by_type_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + raise TApplicationException(TApplicationException.MISSING_RESULT, "get_tables_by_type failed: unknown result") + + def get_all_materialized_view_objects_for_rewriting(self): + self.send_get_all_materialized_view_objects_for_rewriting() + return self.recv_get_all_materialized_view_objects_for_rewriting() + + def send_get_all_materialized_view_objects_for_rewriting(self): + self._oprot.writeMessageBegin("get_all_materialized_view_objects_for_rewriting", TMessageType.CALL, self._seqid) + args = get_all_materialized_view_objects_for_rewriting_args() + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_get_all_materialized_view_objects_for_rewriting(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = get_all_materialized_view_objects_for_rewriting_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + raise TApplicationException( + TApplicationException.MISSING_RESULT, "get_all_materialized_view_objects_for_rewriting failed: unknown result" + ) + + def get_materialized_views_for_rewriting(self, db_name): + """ + Parameters: + - db_name + + """ + self.send_get_materialized_views_for_rewriting(db_name) + return self.recv_get_materialized_views_for_rewriting() + + def send_get_materialized_views_for_rewriting(self, db_name): + self._oprot.writeMessageBegin("get_materialized_views_for_rewriting", TMessageType.CALL, self._seqid) + args = get_materialized_views_for_rewriting_args() + args.db_name = db_name + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_get_materialized_views_for_rewriting(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = get_materialized_views_for_rewriting_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + raise TApplicationException( + TApplicationException.MISSING_RESULT, "get_materialized_views_for_rewriting failed: unknown result" + ) + + def get_table_meta(self, db_patterns, tbl_patterns, tbl_types): + """ + Parameters: + - db_patterns + - tbl_patterns + - tbl_types + + """ + self.send_get_table_meta(db_patterns, tbl_patterns, tbl_types) + return self.recv_get_table_meta() + + def send_get_table_meta(self, db_patterns, tbl_patterns, tbl_types): + self._oprot.writeMessageBegin("get_table_meta", TMessageType.CALL, self._seqid) + args = get_table_meta_args() + args.db_patterns = db_patterns + args.tbl_patterns = tbl_patterns + args.tbl_types = tbl_types + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_get_table_meta(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = get_table_meta_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + raise TApplicationException(TApplicationException.MISSING_RESULT, "get_table_meta failed: unknown result") + + def get_all_tables(self, db_name): + """ + Parameters: + - db_name + + """ + self.send_get_all_tables(db_name) + return self.recv_get_all_tables() + + def send_get_all_tables(self, db_name): + self._oprot.writeMessageBegin("get_all_tables", TMessageType.CALL, self._seqid) + args = get_all_tables_args() + args.db_name = db_name + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_get_all_tables(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = get_all_tables_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + raise TApplicationException(TApplicationException.MISSING_RESULT, "get_all_tables failed: unknown result") + + def get_table(self, dbname, tbl_name): + """ + Parameters: + - dbname + - tbl_name + + """ + self.send_get_table(dbname, tbl_name) + return self.recv_get_table() + + def send_get_table(self, dbname, tbl_name): + self._oprot.writeMessageBegin("get_table", TMessageType.CALL, self._seqid) + args = get_table_args() + args.dbname = dbname + args.tbl_name = tbl_name + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_get_table(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = get_table_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + raise TApplicationException(TApplicationException.MISSING_RESULT, "get_table failed: unknown result") + + def get_table_objects_by_name(self, dbname, tbl_names): + """ + Parameters: + - dbname + - tbl_names + + """ + self.send_get_table_objects_by_name(dbname, tbl_names) + return self.recv_get_table_objects_by_name() + + def send_get_table_objects_by_name(self, dbname, tbl_names): + self._oprot.writeMessageBegin("get_table_objects_by_name", TMessageType.CALL, self._seqid) + args = get_table_objects_by_name_args() + args.dbname = dbname + args.tbl_names = tbl_names + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_get_table_objects_by_name(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = get_table_objects_by_name_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + raise TApplicationException(TApplicationException.MISSING_RESULT, "get_table_objects_by_name failed: unknown result") + + def get_tables_ext(self, req): + """ + Parameters: + - req + + """ + self.send_get_tables_ext(req) + return self.recv_get_tables_ext() + + def send_get_tables_ext(self, req): + self._oprot.writeMessageBegin("get_tables_ext", TMessageType.CALL, self._seqid) + args = get_tables_ext_args() + args.req = req + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_get_tables_ext(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = get_tables_ext_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + raise TApplicationException(TApplicationException.MISSING_RESULT, "get_tables_ext failed: unknown result") + + def get_table_req(self, req): + """ + Parameters: + - req + + """ + self.send_get_table_req(req) + return self.recv_get_table_req() + + def send_get_table_req(self, req): + self._oprot.writeMessageBegin("get_table_req", TMessageType.CALL, self._seqid) + args = get_table_req_args() + args.req = req + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_get_table_req(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = get_table_req_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + raise TApplicationException(TApplicationException.MISSING_RESULT, "get_table_req failed: unknown result") + + def get_table_objects_by_name_req(self, req): + """ + Parameters: + - req + + """ + self.send_get_table_objects_by_name_req(req) + return self.recv_get_table_objects_by_name_req() + + def send_get_table_objects_by_name_req(self, req): + self._oprot.writeMessageBegin("get_table_objects_by_name_req", TMessageType.CALL, self._seqid) + args = get_table_objects_by_name_req_args() + args.req = req + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_get_table_objects_by_name_req(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = get_table_objects_by_name_req_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + if result.o3 is not None: + raise result.o3 + raise TApplicationException(TApplicationException.MISSING_RESULT, "get_table_objects_by_name_req failed: unknown result") + + def get_materialization_invalidation_info(self, creation_metadata, validTxnList): + """ + Parameters: + - creation_metadata + - validTxnList + + """ + self.send_get_materialization_invalidation_info(creation_metadata, validTxnList) + return self.recv_get_materialization_invalidation_info() + + def send_get_materialization_invalidation_info(self, creation_metadata, validTxnList): + self._oprot.writeMessageBegin("get_materialization_invalidation_info", TMessageType.CALL, self._seqid) + args = get_materialization_invalidation_info_args() + args.creation_metadata = creation_metadata + args.validTxnList = validTxnList + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_get_materialization_invalidation_info(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = get_materialization_invalidation_info_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + if result.o3 is not None: + raise result.o3 + raise TApplicationException( + TApplicationException.MISSING_RESULT, "get_materialization_invalidation_info failed: unknown result" + ) + + def update_creation_metadata(self, catName, dbname, tbl_name, creation_metadata): + """ + Parameters: + - catName + - dbname + - tbl_name + - creation_metadata + + """ + self.send_update_creation_metadata(catName, dbname, tbl_name, creation_metadata) + self.recv_update_creation_metadata() + + def send_update_creation_metadata(self, catName, dbname, tbl_name, creation_metadata): + self._oprot.writeMessageBegin("update_creation_metadata", TMessageType.CALL, self._seqid) + args = update_creation_metadata_args() + args.catName = catName + args.dbname = dbname + args.tbl_name = tbl_name + args.creation_metadata = creation_metadata + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_update_creation_metadata(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = update_creation_metadata_result() + result.read(iprot) + iprot.readMessageEnd() + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + if result.o3 is not None: + raise result.o3 + return + + def get_table_names_by_filter(self, dbname, filter, max_tables): + """ + Parameters: + - dbname + - filter + - max_tables + + """ + self.send_get_table_names_by_filter(dbname, filter, max_tables) + return self.recv_get_table_names_by_filter() + + def send_get_table_names_by_filter(self, dbname, filter, max_tables): + self._oprot.writeMessageBegin("get_table_names_by_filter", TMessageType.CALL, self._seqid) + args = get_table_names_by_filter_args() + args.dbname = dbname + args.filter = filter + args.max_tables = max_tables + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_get_table_names_by_filter(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = get_table_names_by_filter_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + if result.o3 is not None: + raise result.o3 + raise TApplicationException(TApplicationException.MISSING_RESULT, "get_table_names_by_filter failed: unknown result") + + def alter_table(self, dbname, tbl_name, new_tbl): + """ + Parameters: + - dbname + - tbl_name + - new_tbl + + """ + self.send_alter_table(dbname, tbl_name, new_tbl) + self.recv_alter_table() + + def send_alter_table(self, dbname, tbl_name, new_tbl): + self._oprot.writeMessageBegin("alter_table", TMessageType.CALL, self._seqid) + args = alter_table_args() + args.dbname = dbname + args.tbl_name = tbl_name + args.new_tbl = new_tbl + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_alter_table(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = alter_table_result() + result.read(iprot) + iprot.readMessageEnd() + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + return + + def alter_table_with_environment_context(self, dbname, tbl_name, new_tbl, environment_context): + """ + Parameters: + - dbname + - tbl_name + - new_tbl + - environment_context + + """ + self.send_alter_table_with_environment_context(dbname, tbl_name, new_tbl, environment_context) + self.recv_alter_table_with_environment_context() + + def send_alter_table_with_environment_context(self, dbname, tbl_name, new_tbl, environment_context): + self._oprot.writeMessageBegin("alter_table_with_environment_context", TMessageType.CALL, self._seqid) + args = alter_table_with_environment_context_args() + args.dbname = dbname + args.tbl_name = tbl_name + args.new_tbl = new_tbl + args.environment_context = environment_context + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_alter_table_with_environment_context(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = alter_table_with_environment_context_result() + result.read(iprot) + iprot.readMessageEnd() + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + return + + def alter_table_with_cascade(self, dbname, tbl_name, new_tbl, cascade): + """ + Parameters: + - dbname + - tbl_name + - new_tbl + - cascade + + """ + self.send_alter_table_with_cascade(dbname, tbl_name, new_tbl, cascade) + self.recv_alter_table_with_cascade() + + def send_alter_table_with_cascade(self, dbname, tbl_name, new_tbl, cascade): + self._oprot.writeMessageBegin("alter_table_with_cascade", TMessageType.CALL, self._seqid) + args = alter_table_with_cascade_args() + args.dbname = dbname + args.tbl_name = tbl_name + args.new_tbl = new_tbl + args.cascade = cascade + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_alter_table_with_cascade(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = alter_table_with_cascade_result() + result.read(iprot) + iprot.readMessageEnd() + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + return + + def alter_table_req(self, req): + """ + Parameters: + - req + + """ + self.send_alter_table_req(req) + return self.recv_alter_table_req() + + def send_alter_table_req(self, req): + self._oprot.writeMessageBegin("alter_table_req", TMessageType.CALL, self._seqid) + args = alter_table_req_args() + args.req = req + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_alter_table_req(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = alter_table_req_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + raise TApplicationException(TApplicationException.MISSING_RESULT, "alter_table_req failed: unknown result") + + def add_partition(self, new_part): + """ + Parameters: + - new_part + + """ + self.send_add_partition(new_part) + return self.recv_add_partition() + + def send_add_partition(self, new_part): + self._oprot.writeMessageBegin("add_partition", TMessageType.CALL, self._seqid) + args = add_partition_args() + args.new_part = new_part + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_add_partition(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = add_partition_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + if result.o3 is not None: + raise result.o3 + raise TApplicationException(TApplicationException.MISSING_RESULT, "add_partition failed: unknown result") + + def add_partition_with_environment_context(self, new_part, environment_context): + """ + Parameters: + - new_part + - environment_context + + """ + self.send_add_partition_with_environment_context(new_part, environment_context) + return self.recv_add_partition_with_environment_context() + + def send_add_partition_with_environment_context(self, new_part, environment_context): + self._oprot.writeMessageBegin("add_partition_with_environment_context", TMessageType.CALL, self._seqid) + args = add_partition_with_environment_context_args() + args.new_part = new_part + args.environment_context = environment_context + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_add_partition_with_environment_context(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = add_partition_with_environment_context_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + if result.o3 is not None: + raise result.o3 + raise TApplicationException( + TApplicationException.MISSING_RESULT, "add_partition_with_environment_context failed: unknown result" + ) + + def add_partitions(self, new_parts): + """ + Parameters: + - new_parts + + """ + self.send_add_partitions(new_parts) + return self.recv_add_partitions() + + def send_add_partitions(self, new_parts): + self._oprot.writeMessageBegin("add_partitions", TMessageType.CALL, self._seqid) + args = add_partitions_args() + args.new_parts = new_parts + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_add_partitions(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = add_partitions_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + if result.o3 is not None: + raise result.o3 + raise TApplicationException(TApplicationException.MISSING_RESULT, "add_partitions failed: unknown result") + + def add_partitions_pspec(self, new_parts): + """ + Parameters: + - new_parts + + """ + self.send_add_partitions_pspec(new_parts) + return self.recv_add_partitions_pspec() + + def send_add_partitions_pspec(self, new_parts): + self._oprot.writeMessageBegin("add_partitions_pspec", TMessageType.CALL, self._seqid) + args = add_partitions_pspec_args() + args.new_parts = new_parts + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_add_partitions_pspec(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = add_partitions_pspec_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + if result.o3 is not None: + raise result.o3 + raise TApplicationException(TApplicationException.MISSING_RESULT, "add_partitions_pspec failed: unknown result") + + def append_partition(self, db_name, tbl_name, part_vals): + """ + Parameters: + - db_name + - tbl_name + - part_vals + + """ + self.send_append_partition(db_name, tbl_name, part_vals) + return self.recv_append_partition() + + def send_append_partition(self, db_name, tbl_name, part_vals): + self._oprot.writeMessageBegin("append_partition", TMessageType.CALL, self._seqid) + args = append_partition_args() + args.db_name = db_name + args.tbl_name = tbl_name + args.part_vals = part_vals + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_append_partition(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = append_partition_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + if result.o3 is not None: + raise result.o3 + raise TApplicationException(TApplicationException.MISSING_RESULT, "append_partition failed: unknown result") + + def add_partitions_req(self, request): + """ + Parameters: + - request + + """ + self.send_add_partitions_req(request) + return self.recv_add_partitions_req() + + def send_add_partitions_req(self, request): + self._oprot.writeMessageBegin("add_partitions_req", TMessageType.CALL, self._seqid) + args = add_partitions_req_args() + args.request = request + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_add_partitions_req(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = add_partitions_req_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + if result.o3 is not None: + raise result.o3 + raise TApplicationException(TApplicationException.MISSING_RESULT, "add_partitions_req failed: unknown result") + + def append_partition_with_environment_context(self, db_name, tbl_name, part_vals, environment_context): + """ + Parameters: + - db_name + - tbl_name + - part_vals + - environment_context + + """ + self.send_append_partition_with_environment_context(db_name, tbl_name, part_vals, environment_context) + return self.recv_append_partition_with_environment_context() + + def send_append_partition_with_environment_context(self, db_name, tbl_name, part_vals, environment_context): + self._oprot.writeMessageBegin("append_partition_with_environment_context", TMessageType.CALL, self._seqid) + args = append_partition_with_environment_context_args() + args.db_name = db_name + args.tbl_name = tbl_name + args.part_vals = part_vals + args.environment_context = environment_context + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_append_partition_with_environment_context(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = append_partition_with_environment_context_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + if result.o3 is not None: + raise result.o3 + raise TApplicationException( + TApplicationException.MISSING_RESULT, "append_partition_with_environment_context failed: unknown result" + ) + + def append_partition_by_name(self, db_name, tbl_name, part_name): + """ + Parameters: + - db_name + - tbl_name + - part_name + + """ + self.send_append_partition_by_name(db_name, tbl_name, part_name) + return self.recv_append_partition_by_name() + + def send_append_partition_by_name(self, db_name, tbl_name, part_name): + self._oprot.writeMessageBegin("append_partition_by_name", TMessageType.CALL, self._seqid) + args = append_partition_by_name_args() + args.db_name = db_name + args.tbl_name = tbl_name + args.part_name = part_name + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_append_partition_by_name(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = append_partition_by_name_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + if result.o3 is not None: + raise result.o3 + raise TApplicationException(TApplicationException.MISSING_RESULT, "append_partition_by_name failed: unknown result") + + def append_partition_by_name_with_environment_context(self, db_name, tbl_name, part_name, environment_context): + """ + Parameters: + - db_name + - tbl_name + - part_name + - environment_context + + """ + self.send_append_partition_by_name_with_environment_context(db_name, tbl_name, part_name, environment_context) + return self.recv_append_partition_by_name_with_environment_context() + + def send_append_partition_by_name_with_environment_context(self, db_name, tbl_name, part_name, environment_context): + self._oprot.writeMessageBegin("append_partition_by_name_with_environment_context", TMessageType.CALL, self._seqid) + args = append_partition_by_name_with_environment_context_args() + args.db_name = db_name + args.tbl_name = tbl_name + args.part_name = part_name + args.environment_context = environment_context + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_append_partition_by_name_with_environment_context(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = append_partition_by_name_with_environment_context_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + if result.o3 is not None: + raise result.o3 + raise TApplicationException( + TApplicationException.MISSING_RESULT, "append_partition_by_name_with_environment_context failed: unknown result" + ) + + def drop_partition(self, db_name, tbl_name, part_vals, deleteData): + """ + Parameters: + - db_name + - tbl_name + - part_vals + - deleteData + + """ + self.send_drop_partition(db_name, tbl_name, part_vals, deleteData) + return self.recv_drop_partition() + + def send_drop_partition(self, db_name, tbl_name, part_vals, deleteData): + self._oprot.writeMessageBegin("drop_partition", TMessageType.CALL, self._seqid) + args = drop_partition_args() + args.db_name = db_name + args.tbl_name = tbl_name + args.part_vals = part_vals + args.deleteData = deleteData + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_drop_partition(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = drop_partition_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + raise TApplicationException(TApplicationException.MISSING_RESULT, "drop_partition failed: unknown result") + + def drop_partition_with_environment_context(self, db_name, tbl_name, part_vals, deleteData, environment_context): + """ + Parameters: + - db_name + - tbl_name + - part_vals + - deleteData + - environment_context + + """ + self.send_drop_partition_with_environment_context(db_name, tbl_name, part_vals, deleteData, environment_context) + return self.recv_drop_partition_with_environment_context() + + def send_drop_partition_with_environment_context(self, db_name, tbl_name, part_vals, deleteData, environment_context): + self._oprot.writeMessageBegin("drop_partition_with_environment_context", TMessageType.CALL, self._seqid) + args = drop_partition_with_environment_context_args() + args.db_name = db_name + args.tbl_name = tbl_name + args.part_vals = part_vals + args.deleteData = deleteData + args.environment_context = environment_context + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_drop_partition_with_environment_context(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = drop_partition_with_environment_context_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + raise TApplicationException( + TApplicationException.MISSING_RESULT, "drop_partition_with_environment_context failed: unknown result" + ) + + def drop_partition_by_name(self, db_name, tbl_name, part_name, deleteData): + """ + Parameters: + - db_name + - tbl_name + - part_name + - deleteData + + """ + self.send_drop_partition_by_name(db_name, tbl_name, part_name, deleteData) + return self.recv_drop_partition_by_name() + + def send_drop_partition_by_name(self, db_name, tbl_name, part_name, deleteData): + self._oprot.writeMessageBegin("drop_partition_by_name", TMessageType.CALL, self._seqid) + args = drop_partition_by_name_args() + args.db_name = db_name + args.tbl_name = tbl_name + args.part_name = part_name + args.deleteData = deleteData + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_drop_partition_by_name(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = drop_partition_by_name_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + raise TApplicationException(TApplicationException.MISSING_RESULT, "drop_partition_by_name failed: unknown result") + + def drop_partition_by_name_with_environment_context(self, db_name, tbl_name, part_name, deleteData, environment_context): + """ + Parameters: + - db_name + - tbl_name + - part_name + - deleteData + - environment_context + + """ + self.send_drop_partition_by_name_with_environment_context(db_name, tbl_name, part_name, deleteData, environment_context) + return self.recv_drop_partition_by_name_with_environment_context() + + def send_drop_partition_by_name_with_environment_context(self, db_name, tbl_name, part_name, deleteData, environment_context): + self._oprot.writeMessageBegin("drop_partition_by_name_with_environment_context", TMessageType.CALL, self._seqid) + args = drop_partition_by_name_with_environment_context_args() + args.db_name = db_name + args.tbl_name = tbl_name + args.part_name = part_name + args.deleteData = deleteData + args.environment_context = environment_context + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_drop_partition_by_name_with_environment_context(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = drop_partition_by_name_with_environment_context_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + raise TApplicationException( + TApplicationException.MISSING_RESULT, "drop_partition_by_name_with_environment_context failed: unknown result" + ) + + def drop_partitions_req(self, req): + """ + Parameters: + - req + + """ + self.send_drop_partitions_req(req) + return self.recv_drop_partitions_req() + + def send_drop_partitions_req(self, req): + self._oprot.writeMessageBegin("drop_partitions_req", TMessageType.CALL, self._seqid) + args = drop_partitions_req_args() + args.req = req + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_drop_partitions_req(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = drop_partitions_req_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + raise TApplicationException(TApplicationException.MISSING_RESULT, "drop_partitions_req failed: unknown result") + + def get_partition(self, db_name, tbl_name, part_vals): + """ + Parameters: + - db_name + - tbl_name + - part_vals + + """ + self.send_get_partition(db_name, tbl_name, part_vals) + return self.recv_get_partition() + + def send_get_partition(self, db_name, tbl_name, part_vals): + self._oprot.writeMessageBegin("get_partition", TMessageType.CALL, self._seqid) + args = get_partition_args() + args.db_name = db_name + args.tbl_name = tbl_name + args.part_vals = part_vals + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_get_partition(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = get_partition_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + raise TApplicationException(TApplicationException.MISSING_RESULT, "get_partition failed: unknown result") + + def get_partition_req(self, req): + """ + Parameters: + - req + + """ + self.send_get_partition_req(req) + return self.recv_get_partition_req() + + def send_get_partition_req(self, req): + self._oprot.writeMessageBegin("get_partition_req", TMessageType.CALL, self._seqid) + args = get_partition_req_args() + args.req = req + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_get_partition_req(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = get_partition_req_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + raise TApplicationException(TApplicationException.MISSING_RESULT, "get_partition_req failed: unknown result") + + def exchange_partition(self, partitionSpecs, source_db, source_table_name, dest_db, dest_table_name): + """ + Parameters: + - partitionSpecs + - source_db + - source_table_name + - dest_db + - dest_table_name + + """ + self.send_exchange_partition(partitionSpecs, source_db, source_table_name, dest_db, dest_table_name) + return self.recv_exchange_partition() + + def send_exchange_partition(self, partitionSpecs, source_db, source_table_name, dest_db, dest_table_name): + self._oprot.writeMessageBegin("exchange_partition", TMessageType.CALL, self._seqid) + args = exchange_partition_args() + args.partitionSpecs = partitionSpecs + args.source_db = source_db + args.source_table_name = source_table_name + args.dest_db = dest_db + args.dest_table_name = dest_table_name + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_exchange_partition(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = exchange_partition_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + if result.o3 is not None: + raise result.o3 + if result.o4 is not None: + raise result.o4 + raise TApplicationException(TApplicationException.MISSING_RESULT, "exchange_partition failed: unknown result") + + def exchange_partitions(self, partitionSpecs, source_db, source_table_name, dest_db, dest_table_name): + """ + Parameters: + - partitionSpecs + - source_db + - source_table_name + - dest_db + - dest_table_name + + """ + self.send_exchange_partitions(partitionSpecs, source_db, source_table_name, dest_db, dest_table_name) + return self.recv_exchange_partitions() + + def send_exchange_partitions(self, partitionSpecs, source_db, source_table_name, dest_db, dest_table_name): + self._oprot.writeMessageBegin("exchange_partitions", TMessageType.CALL, self._seqid) + args = exchange_partitions_args() + args.partitionSpecs = partitionSpecs + args.source_db = source_db + args.source_table_name = source_table_name + args.dest_db = dest_db + args.dest_table_name = dest_table_name + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_exchange_partitions(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = exchange_partitions_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + if result.o3 is not None: + raise result.o3 + if result.o4 is not None: + raise result.o4 + raise TApplicationException(TApplicationException.MISSING_RESULT, "exchange_partitions failed: unknown result") + + def get_partition_with_auth(self, db_name, tbl_name, part_vals, user_name, group_names): + """ + Parameters: + - db_name + - tbl_name + - part_vals + - user_name + - group_names + + """ + self.send_get_partition_with_auth(db_name, tbl_name, part_vals, user_name, group_names) + return self.recv_get_partition_with_auth() + + def send_get_partition_with_auth(self, db_name, tbl_name, part_vals, user_name, group_names): + self._oprot.writeMessageBegin("get_partition_with_auth", TMessageType.CALL, self._seqid) + args = get_partition_with_auth_args() + args.db_name = db_name + args.tbl_name = tbl_name + args.part_vals = part_vals + args.user_name = user_name + args.group_names = group_names + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_get_partition_with_auth(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = get_partition_with_auth_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + raise TApplicationException(TApplicationException.MISSING_RESULT, "get_partition_with_auth failed: unknown result") + + def get_partition_by_name(self, db_name, tbl_name, part_name): + """ + Parameters: + - db_name + - tbl_name + - part_name + + """ + self.send_get_partition_by_name(db_name, tbl_name, part_name) + return self.recv_get_partition_by_name() + + def send_get_partition_by_name(self, db_name, tbl_name, part_name): + self._oprot.writeMessageBegin("get_partition_by_name", TMessageType.CALL, self._seqid) + args = get_partition_by_name_args() + args.db_name = db_name + args.tbl_name = tbl_name + args.part_name = part_name + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_get_partition_by_name(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = get_partition_by_name_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + raise TApplicationException(TApplicationException.MISSING_RESULT, "get_partition_by_name failed: unknown result") + + def get_partitions(self, db_name, tbl_name, max_parts): + """ + Parameters: + - db_name + - tbl_name + - max_parts + + """ + self.send_get_partitions(db_name, tbl_name, max_parts) + return self.recv_get_partitions() + + def send_get_partitions(self, db_name, tbl_name, max_parts): + self._oprot.writeMessageBegin("get_partitions", TMessageType.CALL, self._seqid) + args = get_partitions_args() + args.db_name = db_name + args.tbl_name = tbl_name + args.max_parts = max_parts + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_get_partitions(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = get_partitions_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + raise TApplicationException(TApplicationException.MISSING_RESULT, "get_partitions failed: unknown result") + + def get_partitions_req(self, req): + """ + Parameters: + - req + + """ + self.send_get_partitions_req(req) + return self.recv_get_partitions_req() + + def send_get_partitions_req(self, req): + self._oprot.writeMessageBegin("get_partitions_req", TMessageType.CALL, self._seqid) + args = get_partitions_req_args() + args.req = req + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_get_partitions_req(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = get_partitions_req_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + raise TApplicationException(TApplicationException.MISSING_RESULT, "get_partitions_req failed: unknown result") + + def get_partitions_with_auth(self, db_name, tbl_name, max_parts, user_name, group_names): + """ + Parameters: + - db_name + - tbl_name + - max_parts + - user_name + - group_names + + """ + self.send_get_partitions_with_auth(db_name, tbl_name, max_parts, user_name, group_names) + return self.recv_get_partitions_with_auth() + + def send_get_partitions_with_auth(self, db_name, tbl_name, max_parts, user_name, group_names): + self._oprot.writeMessageBegin("get_partitions_with_auth", TMessageType.CALL, self._seqid) + args = get_partitions_with_auth_args() + args.db_name = db_name + args.tbl_name = tbl_name + args.max_parts = max_parts + args.user_name = user_name + args.group_names = group_names + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_get_partitions_with_auth(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = get_partitions_with_auth_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + raise TApplicationException(TApplicationException.MISSING_RESULT, "get_partitions_with_auth failed: unknown result") + + def get_partitions_pspec(self, db_name, tbl_name, max_parts): + """ + Parameters: + - db_name + - tbl_name + - max_parts + + """ + self.send_get_partitions_pspec(db_name, tbl_name, max_parts) + return self.recv_get_partitions_pspec() + + def send_get_partitions_pspec(self, db_name, tbl_name, max_parts): + self._oprot.writeMessageBegin("get_partitions_pspec", TMessageType.CALL, self._seqid) + args = get_partitions_pspec_args() + args.db_name = db_name + args.tbl_name = tbl_name + args.max_parts = max_parts + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_get_partitions_pspec(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = get_partitions_pspec_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + raise TApplicationException(TApplicationException.MISSING_RESULT, "get_partitions_pspec failed: unknown result") + + def get_partition_names(self, db_name, tbl_name, max_parts): + """ + Parameters: + - db_name + - tbl_name + - max_parts + + """ + self.send_get_partition_names(db_name, tbl_name, max_parts) + return self.recv_get_partition_names() + + def send_get_partition_names(self, db_name, tbl_name, max_parts): + self._oprot.writeMessageBegin("get_partition_names", TMessageType.CALL, self._seqid) + args = get_partition_names_args() + args.db_name = db_name + args.tbl_name = tbl_name + args.max_parts = max_parts + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_get_partition_names(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = get_partition_names_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + raise TApplicationException(TApplicationException.MISSING_RESULT, "get_partition_names failed: unknown result") + + def get_partition_values(self, request): + """ + Parameters: + - request + + """ + self.send_get_partition_values(request) + return self.recv_get_partition_values() + + def send_get_partition_values(self, request): + self._oprot.writeMessageBegin("get_partition_values", TMessageType.CALL, self._seqid) + args = get_partition_values_args() + args.request = request + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_get_partition_values(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = get_partition_values_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + raise TApplicationException(TApplicationException.MISSING_RESULT, "get_partition_values failed: unknown result") + + def get_partitions_ps(self, db_name, tbl_name, part_vals, max_parts): + """ + Parameters: + - db_name + - tbl_name + - part_vals + - max_parts + + """ + self.send_get_partitions_ps(db_name, tbl_name, part_vals, max_parts) + return self.recv_get_partitions_ps() + + def send_get_partitions_ps(self, db_name, tbl_name, part_vals, max_parts): + self._oprot.writeMessageBegin("get_partitions_ps", TMessageType.CALL, self._seqid) + args = get_partitions_ps_args() + args.db_name = db_name + args.tbl_name = tbl_name + args.part_vals = part_vals + args.max_parts = max_parts + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_get_partitions_ps(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = get_partitions_ps_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + raise TApplicationException(TApplicationException.MISSING_RESULT, "get_partitions_ps failed: unknown result") + + def get_partitions_ps_with_auth(self, db_name, tbl_name, part_vals, max_parts, user_name, group_names): + """ + Parameters: + - db_name + - tbl_name + - part_vals + - max_parts + - user_name + - group_names + + """ + self.send_get_partitions_ps_with_auth(db_name, tbl_name, part_vals, max_parts, user_name, group_names) + return self.recv_get_partitions_ps_with_auth() + + def send_get_partitions_ps_with_auth(self, db_name, tbl_name, part_vals, max_parts, user_name, group_names): + self._oprot.writeMessageBegin("get_partitions_ps_with_auth", TMessageType.CALL, self._seqid) + args = get_partitions_ps_with_auth_args() + args.db_name = db_name + args.tbl_name = tbl_name + args.part_vals = part_vals + args.max_parts = max_parts + args.user_name = user_name + args.group_names = group_names + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_get_partitions_ps_with_auth(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = get_partitions_ps_with_auth_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + raise TApplicationException(TApplicationException.MISSING_RESULT, "get_partitions_ps_with_auth failed: unknown result") + + def get_partitions_ps_with_auth_req(self, req): + """ + Parameters: + - req + + """ + self.send_get_partitions_ps_with_auth_req(req) + return self.recv_get_partitions_ps_with_auth_req() + + def send_get_partitions_ps_with_auth_req(self, req): + self._oprot.writeMessageBegin("get_partitions_ps_with_auth_req", TMessageType.CALL, self._seqid) + args = get_partitions_ps_with_auth_req_args() + args.req = req + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_get_partitions_ps_with_auth_req(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = get_partitions_ps_with_auth_req_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + raise TApplicationException( + TApplicationException.MISSING_RESULT, "get_partitions_ps_with_auth_req failed: unknown result" + ) + + def get_partition_names_ps(self, db_name, tbl_name, part_vals, max_parts): + """ + Parameters: + - db_name + - tbl_name + - part_vals + - max_parts + + """ + self.send_get_partition_names_ps(db_name, tbl_name, part_vals, max_parts) + return self.recv_get_partition_names_ps() + + def send_get_partition_names_ps(self, db_name, tbl_name, part_vals, max_parts): + self._oprot.writeMessageBegin("get_partition_names_ps", TMessageType.CALL, self._seqid) + args = get_partition_names_ps_args() + args.db_name = db_name + args.tbl_name = tbl_name + args.part_vals = part_vals + args.max_parts = max_parts + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_get_partition_names_ps(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = get_partition_names_ps_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + raise TApplicationException(TApplicationException.MISSING_RESULT, "get_partition_names_ps failed: unknown result") + + def get_partition_names_ps_req(self, req): + """ + Parameters: + - req + + """ + self.send_get_partition_names_ps_req(req) + return self.recv_get_partition_names_ps_req() + + def send_get_partition_names_ps_req(self, req): + self._oprot.writeMessageBegin("get_partition_names_ps_req", TMessageType.CALL, self._seqid) + args = get_partition_names_ps_req_args() + args.req = req + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_get_partition_names_ps_req(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = get_partition_names_ps_req_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + raise TApplicationException(TApplicationException.MISSING_RESULT, "get_partition_names_ps_req failed: unknown result") + + def get_partition_names_req(self, req): + """ + Parameters: + - req + + """ + self.send_get_partition_names_req(req) + return self.recv_get_partition_names_req() + + def send_get_partition_names_req(self, req): + self._oprot.writeMessageBegin("get_partition_names_req", TMessageType.CALL, self._seqid) + args = get_partition_names_req_args() + args.req = req + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_get_partition_names_req(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = get_partition_names_req_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + raise TApplicationException(TApplicationException.MISSING_RESULT, "get_partition_names_req failed: unknown result") + + def get_partitions_by_filter(self, db_name, tbl_name, filter, max_parts): + """ + Parameters: + - db_name + - tbl_name + - filter + - max_parts + + """ + self.send_get_partitions_by_filter(db_name, tbl_name, filter, max_parts) + return self.recv_get_partitions_by_filter() + + def send_get_partitions_by_filter(self, db_name, tbl_name, filter, max_parts): + self._oprot.writeMessageBegin("get_partitions_by_filter", TMessageType.CALL, self._seqid) + args = get_partitions_by_filter_args() + args.db_name = db_name + args.tbl_name = tbl_name + args.filter = filter + args.max_parts = max_parts + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_get_partitions_by_filter(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = get_partitions_by_filter_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + raise TApplicationException(TApplicationException.MISSING_RESULT, "get_partitions_by_filter failed: unknown result") + + def get_part_specs_by_filter(self, db_name, tbl_name, filter, max_parts): + """ + Parameters: + - db_name + - tbl_name + - filter + - max_parts + + """ + self.send_get_part_specs_by_filter(db_name, tbl_name, filter, max_parts) + return self.recv_get_part_specs_by_filter() + + def send_get_part_specs_by_filter(self, db_name, tbl_name, filter, max_parts): + self._oprot.writeMessageBegin("get_part_specs_by_filter", TMessageType.CALL, self._seqid) + args = get_part_specs_by_filter_args() + args.db_name = db_name + args.tbl_name = tbl_name + args.filter = filter + args.max_parts = max_parts + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_get_part_specs_by_filter(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = get_part_specs_by_filter_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + raise TApplicationException(TApplicationException.MISSING_RESULT, "get_part_specs_by_filter failed: unknown result") + + def get_partitions_by_expr(self, req): + """ + Parameters: + - req + + """ + self.send_get_partitions_by_expr(req) + return self.recv_get_partitions_by_expr() + + def send_get_partitions_by_expr(self, req): + self._oprot.writeMessageBegin("get_partitions_by_expr", TMessageType.CALL, self._seqid) + args = get_partitions_by_expr_args() + args.req = req + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_get_partitions_by_expr(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = get_partitions_by_expr_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + raise TApplicationException(TApplicationException.MISSING_RESULT, "get_partitions_by_expr failed: unknown result") + + def get_partitions_spec_by_expr(self, req): + """ + Parameters: + - req + + """ + self.send_get_partitions_spec_by_expr(req) + return self.recv_get_partitions_spec_by_expr() + + def send_get_partitions_spec_by_expr(self, req): + self._oprot.writeMessageBegin("get_partitions_spec_by_expr", TMessageType.CALL, self._seqid) + args = get_partitions_spec_by_expr_args() + args.req = req + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_get_partitions_spec_by_expr(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = get_partitions_spec_by_expr_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + raise TApplicationException(TApplicationException.MISSING_RESULT, "get_partitions_spec_by_expr failed: unknown result") + + def get_num_partitions_by_filter(self, db_name, tbl_name, filter): + """ + Parameters: + - db_name + - tbl_name + - filter + + """ + self.send_get_num_partitions_by_filter(db_name, tbl_name, filter) + return self.recv_get_num_partitions_by_filter() + + def send_get_num_partitions_by_filter(self, db_name, tbl_name, filter): + self._oprot.writeMessageBegin("get_num_partitions_by_filter", TMessageType.CALL, self._seqid) + args = get_num_partitions_by_filter_args() + args.db_name = db_name + args.tbl_name = tbl_name + args.filter = filter + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_get_num_partitions_by_filter(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = get_num_partitions_by_filter_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + raise TApplicationException(TApplicationException.MISSING_RESULT, "get_num_partitions_by_filter failed: unknown result") + + def get_partitions_by_names(self, db_name, tbl_name, names): + """ + Parameters: + - db_name + - tbl_name + - names + + """ + self.send_get_partitions_by_names(db_name, tbl_name, names) + return self.recv_get_partitions_by_names() + + def send_get_partitions_by_names(self, db_name, tbl_name, names): + self._oprot.writeMessageBegin("get_partitions_by_names", TMessageType.CALL, self._seqid) + args = get_partitions_by_names_args() + args.db_name = db_name + args.tbl_name = tbl_name + args.names = names + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_get_partitions_by_names(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = get_partitions_by_names_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + raise TApplicationException(TApplicationException.MISSING_RESULT, "get_partitions_by_names failed: unknown result") + + def get_partitions_by_names_req(self, req): + """ + Parameters: + - req + + """ + self.send_get_partitions_by_names_req(req) + return self.recv_get_partitions_by_names_req() + + def send_get_partitions_by_names_req(self, req): + self._oprot.writeMessageBegin("get_partitions_by_names_req", TMessageType.CALL, self._seqid) + args = get_partitions_by_names_req_args() + args.req = req + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_get_partitions_by_names_req(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = get_partitions_by_names_req_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + raise TApplicationException(TApplicationException.MISSING_RESULT, "get_partitions_by_names_req failed: unknown result") + + def alter_partition(self, db_name, tbl_name, new_part): + """ + Parameters: + - db_name + - tbl_name + - new_part + + """ + self.send_alter_partition(db_name, tbl_name, new_part) + self.recv_alter_partition() + + def send_alter_partition(self, db_name, tbl_name, new_part): + self._oprot.writeMessageBegin("alter_partition", TMessageType.CALL, self._seqid) + args = alter_partition_args() + args.db_name = db_name + args.tbl_name = tbl_name + args.new_part = new_part + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_alter_partition(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = alter_partition_result() + result.read(iprot) + iprot.readMessageEnd() + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + return + + def alter_partitions(self, db_name, tbl_name, new_parts): + """ + Parameters: + - db_name + - tbl_name + - new_parts + + """ + self.send_alter_partitions(db_name, tbl_name, new_parts) + self.recv_alter_partitions() + + def send_alter_partitions(self, db_name, tbl_name, new_parts): + self._oprot.writeMessageBegin("alter_partitions", TMessageType.CALL, self._seqid) + args = alter_partitions_args() + args.db_name = db_name + args.tbl_name = tbl_name + args.new_parts = new_parts + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_alter_partitions(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = alter_partitions_result() + result.read(iprot) + iprot.readMessageEnd() + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + return + + def alter_partitions_with_environment_context(self, db_name, tbl_name, new_parts, environment_context): + """ + Parameters: + - db_name + - tbl_name + - new_parts + - environment_context + + """ + self.send_alter_partitions_with_environment_context(db_name, tbl_name, new_parts, environment_context) + self.recv_alter_partitions_with_environment_context() + + def send_alter_partitions_with_environment_context(self, db_name, tbl_name, new_parts, environment_context): + self._oprot.writeMessageBegin("alter_partitions_with_environment_context", TMessageType.CALL, self._seqid) + args = alter_partitions_with_environment_context_args() + args.db_name = db_name + args.tbl_name = tbl_name + args.new_parts = new_parts + args.environment_context = environment_context + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_alter_partitions_with_environment_context(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = alter_partitions_with_environment_context_result() + result.read(iprot) + iprot.readMessageEnd() + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + return + + def alter_partitions_req(self, req): + """ + Parameters: + - req + + """ + self.send_alter_partitions_req(req) + return self.recv_alter_partitions_req() + + def send_alter_partitions_req(self, req): + self._oprot.writeMessageBegin("alter_partitions_req", TMessageType.CALL, self._seqid) + args = alter_partitions_req_args() + args.req = req + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_alter_partitions_req(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = alter_partitions_req_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + raise TApplicationException(TApplicationException.MISSING_RESULT, "alter_partitions_req failed: unknown result") + + def alter_partition_with_environment_context(self, db_name, tbl_name, new_part, environment_context): + """ + Parameters: + - db_name + - tbl_name + - new_part + - environment_context + + """ + self.send_alter_partition_with_environment_context(db_name, tbl_name, new_part, environment_context) + self.recv_alter_partition_with_environment_context() + + def send_alter_partition_with_environment_context(self, db_name, tbl_name, new_part, environment_context): + self._oprot.writeMessageBegin("alter_partition_with_environment_context", TMessageType.CALL, self._seqid) + args = alter_partition_with_environment_context_args() + args.db_name = db_name + args.tbl_name = tbl_name + args.new_part = new_part + args.environment_context = environment_context + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_alter_partition_with_environment_context(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = alter_partition_with_environment_context_result() + result.read(iprot) + iprot.readMessageEnd() + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + return + + def rename_partition(self, db_name, tbl_name, part_vals, new_part): + """ + Parameters: + - db_name + - tbl_name + - part_vals + - new_part + + """ + self.send_rename_partition(db_name, tbl_name, part_vals, new_part) + self.recv_rename_partition() + + def send_rename_partition(self, db_name, tbl_name, part_vals, new_part): + self._oprot.writeMessageBegin("rename_partition", TMessageType.CALL, self._seqid) + args = rename_partition_args() + args.db_name = db_name + args.tbl_name = tbl_name + args.part_vals = part_vals + args.new_part = new_part + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_rename_partition(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = rename_partition_result() + result.read(iprot) + iprot.readMessageEnd() + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + return + + def rename_partition_req(self, req): + """ + Parameters: + - req + + """ + self.send_rename_partition_req(req) + return self.recv_rename_partition_req() + + def send_rename_partition_req(self, req): + self._oprot.writeMessageBegin("rename_partition_req", TMessageType.CALL, self._seqid) + args = rename_partition_req_args() + args.req = req + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_rename_partition_req(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = rename_partition_req_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + raise TApplicationException(TApplicationException.MISSING_RESULT, "rename_partition_req failed: unknown result") + + def partition_name_has_valid_characters(self, part_vals, throw_exception): + """ + Parameters: + - part_vals + - throw_exception + + """ + self.send_partition_name_has_valid_characters(part_vals, throw_exception) + return self.recv_partition_name_has_valid_characters() + + def send_partition_name_has_valid_characters(self, part_vals, throw_exception): + self._oprot.writeMessageBegin("partition_name_has_valid_characters", TMessageType.CALL, self._seqid) + args = partition_name_has_valid_characters_args() + args.part_vals = part_vals + args.throw_exception = throw_exception + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_partition_name_has_valid_characters(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = partition_name_has_valid_characters_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + raise TApplicationException( + TApplicationException.MISSING_RESULT, "partition_name_has_valid_characters failed: unknown result" + ) + + def get_config_value(self, name, defaultValue): + """ + Parameters: + - name + - defaultValue + + """ + self.send_get_config_value(name, defaultValue) + return self.recv_get_config_value() + + def send_get_config_value(self, name, defaultValue): + self._oprot.writeMessageBegin("get_config_value", TMessageType.CALL, self._seqid) + args = get_config_value_args() + args.name = name + args.defaultValue = defaultValue + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_get_config_value(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = get_config_value_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + raise TApplicationException(TApplicationException.MISSING_RESULT, "get_config_value failed: unknown result") + + def partition_name_to_vals(self, part_name): + """ + Parameters: + - part_name + + """ + self.send_partition_name_to_vals(part_name) + return self.recv_partition_name_to_vals() + + def send_partition_name_to_vals(self, part_name): + self._oprot.writeMessageBegin("partition_name_to_vals", TMessageType.CALL, self._seqid) + args = partition_name_to_vals_args() + args.part_name = part_name + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_partition_name_to_vals(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = partition_name_to_vals_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + raise TApplicationException(TApplicationException.MISSING_RESULT, "partition_name_to_vals failed: unknown result") + + def partition_name_to_spec(self, part_name): + """ + Parameters: + - part_name + + """ + self.send_partition_name_to_spec(part_name) + return self.recv_partition_name_to_spec() + + def send_partition_name_to_spec(self, part_name): + self._oprot.writeMessageBegin("partition_name_to_spec", TMessageType.CALL, self._seqid) + args = partition_name_to_spec_args() + args.part_name = part_name + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_partition_name_to_spec(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = partition_name_to_spec_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + raise TApplicationException(TApplicationException.MISSING_RESULT, "partition_name_to_spec failed: unknown result") + + def markPartitionForEvent(self, db_name, tbl_name, part_vals, eventType): + """ + Parameters: + - db_name + - tbl_name + - part_vals + - eventType + + """ + self.send_markPartitionForEvent(db_name, tbl_name, part_vals, eventType) + self.recv_markPartitionForEvent() + + def send_markPartitionForEvent(self, db_name, tbl_name, part_vals, eventType): + self._oprot.writeMessageBegin("markPartitionForEvent", TMessageType.CALL, self._seqid) + args = markPartitionForEvent_args() + args.db_name = db_name + args.tbl_name = tbl_name + args.part_vals = part_vals + args.eventType = eventType + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_markPartitionForEvent(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = markPartitionForEvent_result() + result.read(iprot) + iprot.readMessageEnd() + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + if result.o3 is not None: + raise result.o3 + if result.o4 is not None: + raise result.o4 + if result.o5 is not None: + raise result.o5 + if result.o6 is not None: + raise result.o6 + return + + def isPartitionMarkedForEvent(self, db_name, tbl_name, part_vals, eventType): + """ + Parameters: + - db_name + - tbl_name + - part_vals + - eventType + + """ + self.send_isPartitionMarkedForEvent(db_name, tbl_name, part_vals, eventType) + return self.recv_isPartitionMarkedForEvent() + + def send_isPartitionMarkedForEvent(self, db_name, tbl_name, part_vals, eventType): + self._oprot.writeMessageBegin("isPartitionMarkedForEvent", TMessageType.CALL, self._seqid) + args = isPartitionMarkedForEvent_args() + args.db_name = db_name + args.tbl_name = tbl_name + args.part_vals = part_vals + args.eventType = eventType + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_isPartitionMarkedForEvent(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = isPartitionMarkedForEvent_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + if result.o3 is not None: + raise result.o3 + if result.o4 is not None: + raise result.o4 + if result.o5 is not None: + raise result.o5 + if result.o6 is not None: + raise result.o6 + raise TApplicationException(TApplicationException.MISSING_RESULT, "isPartitionMarkedForEvent failed: unknown result") + + def get_primary_keys(self, request): + """ + Parameters: + - request + + """ + self.send_get_primary_keys(request) + return self.recv_get_primary_keys() + + def send_get_primary_keys(self, request): + self._oprot.writeMessageBegin("get_primary_keys", TMessageType.CALL, self._seqid) + args = get_primary_keys_args() + args.request = request + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_get_primary_keys(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = get_primary_keys_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + raise TApplicationException(TApplicationException.MISSING_RESULT, "get_primary_keys failed: unknown result") + + def get_foreign_keys(self, request): + """ + Parameters: + - request + + """ + self.send_get_foreign_keys(request) + return self.recv_get_foreign_keys() + + def send_get_foreign_keys(self, request): + self._oprot.writeMessageBegin("get_foreign_keys", TMessageType.CALL, self._seqid) + args = get_foreign_keys_args() + args.request = request + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_get_foreign_keys(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = get_foreign_keys_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + raise TApplicationException(TApplicationException.MISSING_RESULT, "get_foreign_keys failed: unknown result") + + def get_unique_constraints(self, request): + """ + Parameters: + - request + + """ + self.send_get_unique_constraints(request) + return self.recv_get_unique_constraints() + + def send_get_unique_constraints(self, request): + self._oprot.writeMessageBegin("get_unique_constraints", TMessageType.CALL, self._seqid) + args = get_unique_constraints_args() + args.request = request + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_get_unique_constraints(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = get_unique_constraints_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + raise TApplicationException(TApplicationException.MISSING_RESULT, "get_unique_constraints failed: unknown result") + + def get_not_null_constraints(self, request): + """ + Parameters: + - request + + """ + self.send_get_not_null_constraints(request) + return self.recv_get_not_null_constraints() + + def send_get_not_null_constraints(self, request): + self._oprot.writeMessageBegin("get_not_null_constraints", TMessageType.CALL, self._seqid) + args = get_not_null_constraints_args() + args.request = request + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_get_not_null_constraints(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = get_not_null_constraints_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + raise TApplicationException(TApplicationException.MISSING_RESULT, "get_not_null_constraints failed: unknown result") + + def get_default_constraints(self, request): + """ + Parameters: + - request + + """ + self.send_get_default_constraints(request) + return self.recv_get_default_constraints() + + def send_get_default_constraints(self, request): + self._oprot.writeMessageBegin("get_default_constraints", TMessageType.CALL, self._seqid) + args = get_default_constraints_args() + args.request = request + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_get_default_constraints(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = get_default_constraints_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + raise TApplicationException(TApplicationException.MISSING_RESULT, "get_default_constraints failed: unknown result") + + def get_check_constraints(self, request): + """ + Parameters: + - request + + """ + self.send_get_check_constraints(request) + return self.recv_get_check_constraints() + + def send_get_check_constraints(self, request): + self._oprot.writeMessageBegin("get_check_constraints", TMessageType.CALL, self._seqid) + args = get_check_constraints_args() + args.request = request + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_get_check_constraints(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = get_check_constraints_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + raise TApplicationException(TApplicationException.MISSING_RESULT, "get_check_constraints failed: unknown result") + + def get_all_table_constraints(self, request): + """ + Parameters: + - request + + """ + self.send_get_all_table_constraints(request) + return self.recv_get_all_table_constraints() + + def send_get_all_table_constraints(self, request): + self._oprot.writeMessageBegin("get_all_table_constraints", TMessageType.CALL, self._seqid) + args = get_all_table_constraints_args() + args.request = request + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_get_all_table_constraints(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = get_all_table_constraints_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + raise TApplicationException(TApplicationException.MISSING_RESULT, "get_all_table_constraints failed: unknown result") + + def update_table_column_statistics(self, stats_obj): + """ + Parameters: + - stats_obj + + """ + self.send_update_table_column_statistics(stats_obj) + return self.recv_update_table_column_statistics() + + def send_update_table_column_statistics(self, stats_obj): + self._oprot.writeMessageBegin("update_table_column_statistics", TMessageType.CALL, self._seqid) + args = update_table_column_statistics_args() + args.stats_obj = stats_obj + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_update_table_column_statistics(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = update_table_column_statistics_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + if result.o3 is not None: + raise result.o3 + if result.o4 is not None: + raise result.o4 + raise TApplicationException(TApplicationException.MISSING_RESULT, "update_table_column_statistics failed: unknown result") + + def update_partition_column_statistics(self, stats_obj): + """ + Parameters: + - stats_obj + + """ + self.send_update_partition_column_statistics(stats_obj) + return self.recv_update_partition_column_statistics() + + def send_update_partition_column_statistics(self, stats_obj): + self._oprot.writeMessageBegin("update_partition_column_statistics", TMessageType.CALL, self._seqid) + args = update_partition_column_statistics_args() + args.stats_obj = stats_obj + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_update_partition_column_statistics(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = update_partition_column_statistics_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + if result.o3 is not None: + raise result.o3 + if result.o4 is not None: + raise result.o4 + raise TApplicationException( + TApplicationException.MISSING_RESULT, "update_partition_column_statistics failed: unknown result" + ) + + def update_table_column_statistics_req(self, req): + """ + Parameters: + - req + + """ + self.send_update_table_column_statistics_req(req) + return self.recv_update_table_column_statistics_req() + + def send_update_table_column_statistics_req(self, req): + self._oprot.writeMessageBegin("update_table_column_statistics_req", TMessageType.CALL, self._seqid) + args = update_table_column_statistics_req_args() + args.req = req + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_update_table_column_statistics_req(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = update_table_column_statistics_req_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + if result.o3 is not None: + raise result.o3 + if result.o4 is not None: + raise result.o4 + raise TApplicationException( + TApplicationException.MISSING_RESULT, "update_table_column_statistics_req failed: unknown result" + ) + + def update_partition_column_statistics_req(self, req): + """ + Parameters: + - req + + """ + self.send_update_partition_column_statistics_req(req) + return self.recv_update_partition_column_statistics_req() + + def send_update_partition_column_statistics_req(self, req): + self._oprot.writeMessageBegin("update_partition_column_statistics_req", TMessageType.CALL, self._seqid) + args = update_partition_column_statistics_req_args() + args.req = req + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_update_partition_column_statistics_req(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = update_partition_column_statistics_req_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + if result.o3 is not None: + raise result.o3 + if result.o4 is not None: + raise result.o4 + raise TApplicationException( + TApplicationException.MISSING_RESULT, "update_partition_column_statistics_req failed: unknown result" + ) + + def update_transaction_statistics(self, req): + """ + Parameters: + - req + + """ + self.send_update_transaction_statistics(req) + self.recv_update_transaction_statistics() + + def send_update_transaction_statistics(self, req): + self._oprot.writeMessageBegin("update_transaction_statistics", TMessageType.CALL, self._seqid) + args = update_transaction_statistics_args() + args.req = req + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_update_transaction_statistics(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = update_transaction_statistics_result() + result.read(iprot) + iprot.readMessageEnd() + if result.o1 is not None: + raise result.o1 + return + + def get_table_column_statistics(self, db_name, tbl_name, col_name): + """ + Parameters: + - db_name + - tbl_name + - col_name + + """ + self.send_get_table_column_statistics(db_name, tbl_name, col_name) + return self.recv_get_table_column_statistics() + + def send_get_table_column_statistics(self, db_name, tbl_name, col_name): + self._oprot.writeMessageBegin("get_table_column_statistics", TMessageType.CALL, self._seqid) + args = get_table_column_statistics_args() + args.db_name = db_name + args.tbl_name = tbl_name + args.col_name = col_name + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_get_table_column_statistics(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = get_table_column_statistics_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + if result.o3 is not None: + raise result.o3 + if result.o4 is not None: + raise result.o4 + raise TApplicationException(TApplicationException.MISSING_RESULT, "get_table_column_statistics failed: unknown result") + + def get_partition_column_statistics(self, db_name, tbl_name, part_name, col_name): + """ + Parameters: + - db_name + - tbl_name + - part_name + - col_name + + """ + self.send_get_partition_column_statistics(db_name, tbl_name, part_name, col_name) + return self.recv_get_partition_column_statistics() + + def send_get_partition_column_statistics(self, db_name, tbl_name, part_name, col_name): + self._oprot.writeMessageBegin("get_partition_column_statistics", TMessageType.CALL, self._seqid) + args = get_partition_column_statistics_args() + args.db_name = db_name + args.tbl_name = tbl_name + args.part_name = part_name + args.col_name = col_name + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_get_partition_column_statistics(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = get_partition_column_statistics_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + if result.o3 is not None: + raise result.o3 + if result.o4 is not None: + raise result.o4 + raise TApplicationException( + TApplicationException.MISSING_RESULT, "get_partition_column_statistics failed: unknown result" + ) + + def get_table_statistics_req(self, request): + """ + Parameters: + - request + + """ + self.send_get_table_statistics_req(request) + return self.recv_get_table_statistics_req() + + def send_get_table_statistics_req(self, request): + self._oprot.writeMessageBegin("get_table_statistics_req", TMessageType.CALL, self._seqid) + args = get_table_statistics_req_args() + args.request = request + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_get_table_statistics_req(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = get_table_statistics_req_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + raise TApplicationException(TApplicationException.MISSING_RESULT, "get_table_statistics_req failed: unknown result") + + def get_partitions_statistics_req(self, request): + """ + Parameters: + - request + + """ + self.send_get_partitions_statistics_req(request) + return self.recv_get_partitions_statistics_req() + + def send_get_partitions_statistics_req(self, request): + self._oprot.writeMessageBegin("get_partitions_statistics_req", TMessageType.CALL, self._seqid) + args = get_partitions_statistics_req_args() + args.request = request + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_get_partitions_statistics_req(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = get_partitions_statistics_req_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + raise TApplicationException(TApplicationException.MISSING_RESULT, "get_partitions_statistics_req failed: unknown result") + + def get_aggr_stats_for(self, request): + """ + Parameters: + - request + + """ + self.send_get_aggr_stats_for(request) + return self.recv_get_aggr_stats_for() + + def send_get_aggr_stats_for(self, request): + self._oprot.writeMessageBegin("get_aggr_stats_for", TMessageType.CALL, self._seqid) + args = get_aggr_stats_for_args() + args.request = request + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_get_aggr_stats_for(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = get_aggr_stats_for_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + raise TApplicationException(TApplicationException.MISSING_RESULT, "get_aggr_stats_for failed: unknown result") + + def set_aggr_stats_for(self, request): + """ + Parameters: + - request + + """ + self.send_set_aggr_stats_for(request) + return self.recv_set_aggr_stats_for() + + def send_set_aggr_stats_for(self, request): + self._oprot.writeMessageBegin("set_aggr_stats_for", TMessageType.CALL, self._seqid) + args = set_aggr_stats_for_args() + args.request = request + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_set_aggr_stats_for(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = set_aggr_stats_for_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + if result.o3 is not None: + raise result.o3 + if result.o4 is not None: + raise result.o4 + raise TApplicationException(TApplicationException.MISSING_RESULT, "set_aggr_stats_for failed: unknown result") + + def delete_partition_column_statistics(self, db_name, tbl_name, part_name, col_name, engine): + """ + Parameters: + - db_name + - tbl_name + - part_name + - col_name + - engine + + """ + self.send_delete_partition_column_statistics(db_name, tbl_name, part_name, col_name, engine) + return self.recv_delete_partition_column_statistics() + + def send_delete_partition_column_statistics(self, db_name, tbl_name, part_name, col_name, engine): + self._oprot.writeMessageBegin("delete_partition_column_statistics", TMessageType.CALL, self._seqid) + args = delete_partition_column_statistics_args() + args.db_name = db_name + args.tbl_name = tbl_name + args.part_name = part_name + args.col_name = col_name + args.engine = engine + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_delete_partition_column_statistics(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = delete_partition_column_statistics_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + if result.o3 is not None: + raise result.o3 + if result.o4 is not None: + raise result.o4 + raise TApplicationException( + TApplicationException.MISSING_RESULT, "delete_partition_column_statistics failed: unknown result" + ) + + def delete_table_column_statistics(self, db_name, tbl_name, col_name, engine): + """ + Parameters: + - db_name + - tbl_name + - col_name + - engine + + """ + self.send_delete_table_column_statistics(db_name, tbl_name, col_name, engine) + return self.recv_delete_table_column_statistics() + + def send_delete_table_column_statistics(self, db_name, tbl_name, col_name, engine): + self._oprot.writeMessageBegin("delete_table_column_statistics", TMessageType.CALL, self._seqid) + args = delete_table_column_statistics_args() + args.db_name = db_name + args.tbl_name = tbl_name + args.col_name = col_name + args.engine = engine + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_delete_table_column_statistics(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = delete_table_column_statistics_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + if result.o3 is not None: + raise result.o3 + if result.o4 is not None: + raise result.o4 + raise TApplicationException(TApplicationException.MISSING_RESULT, "delete_table_column_statistics failed: unknown result") + + def create_function(self, func): + """ + Parameters: + - func + + """ + self.send_create_function(func) + self.recv_create_function() + + def send_create_function(self, func): + self._oprot.writeMessageBegin("create_function", TMessageType.CALL, self._seqid) + args = create_function_args() + args.func = func + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_create_function(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = create_function_result() + result.read(iprot) + iprot.readMessageEnd() + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + if result.o3 is not None: + raise result.o3 + if result.o4 is not None: + raise result.o4 + return + + def drop_function(self, dbName, funcName): + """ + Parameters: + - dbName + - funcName + + """ + self.send_drop_function(dbName, funcName) + self.recv_drop_function() + + def send_drop_function(self, dbName, funcName): + self._oprot.writeMessageBegin("drop_function", TMessageType.CALL, self._seqid) + args = drop_function_args() + args.dbName = dbName + args.funcName = funcName + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_drop_function(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = drop_function_result() + result.read(iprot) + iprot.readMessageEnd() + if result.o1 is not None: + raise result.o1 + if result.o3 is not None: + raise result.o3 + return + + def alter_function(self, dbName, funcName, newFunc): + """ + Parameters: + - dbName + - funcName + - newFunc + + """ + self.send_alter_function(dbName, funcName, newFunc) + self.recv_alter_function() + + def send_alter_function(self, dbName, funcName, newFunc): + self._oprot.writeMessageBegin("alter_function", TMessageType.CALL, self._seqid) + args = alter_function_args() + args.dbName = dbName + args.funcName = funcName + args.newFunc = newFunc + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_alter_function(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = alter_function_result() + result.read(iprot) + iprot.readMessageEnd() + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + return + + def get_functions(self, dbName, pattern): + """ + Parameters: + - dbName + - pattern + + """ + self.send_get_functions(dbName, pattern) + return self.recv_get_functions() + + def send_get_functions(self, dbName, pattern): + self._oprot.writeMessageBegin("get_functions", TMessageType.CALL, self._seqid) + args = get_functions_args() + args.dbName = dbName + args.pattern = pattern + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_get_functions(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = get_functions_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + raise TApplicationException(TApplicationException.MISSING_RESULT, "get_functions failed: unknown result") + + def get_function(self, dbName, funcName): + """ + Parameters: + - dbName + - funcName + + """ + self.send_get_function(dbName, funcName) + return self.recv_get_function() + + def send_get_function(self, dbName, funcName): + self._oprot.writeMessageBegin("get_function", TMessageType.CALL, self._seqid) + args = get_function_args() + args.dbName = dbName + args.funcName = funcName + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_get_function(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = get_function_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + raise TApplicationException(TApplicationException.MISSING_RESULT, "get_function failed: unknown result") + + def get_all_functions(self): + self.send_get_all_functions() + return self.recv_get_all_functions() + + def send_get_all_functions(self): + self._oprot.writeMessageBegin("get_all_functions", TMessageType.CALL, self._seqid) + args = get_all_functions_args() + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_get_all_functions(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = get_all_functions_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + raise TApplicationException(TApplicationException.MISSING_RESULT, "get_all_functions failed: unknown result") + + def create_role(self, role): + """ + Parameters: + - role + + """ + self.send_create_role(role) + return self.recv_create_role() + + def send_create_role(self, role): + self._oprot.writeMessageBegin("create_role", TMessageType.CALL, self._seqid) + args = create_role_args() + args.role = role + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_create_role(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = create_role_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + raise TApplicationException(TApplicationException.MISSING_RESULT, "create_role failed: unknown result") + + def drop_role(self, role_name): + """ + Parameters: + - role_name + + """ + self.send_drop_role(role_name) + return self.recv_drop_role() + + def send_drop_role(self, role_name): + self._oprot.writeMessageBegin("drop_role", TMessageType.CALL, self._seqid) + args = drop_role_args() + args.role_name = role_name + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_drop_role(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = drop_role_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + raise TApplicationException(TApplicationException.MISSING_RESULT, "drop_role failed: unknown result") + + def get_role_names(self): + self.send_get_role_names() + return self.recv_get_role_names() + + def send_get_role_names(self): + self._oprot.writeMessageBegin("get_role_names", TMessageType.CALL, self._seqid) + args = get_role_names_args() + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_get_role_names(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = get_role_names_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + raise TApplicationException(TApplicationException.MISSING_RESULT, "get_role_names failed: unknown result") + + def grant_role(self, role_name, principal_name, principal_type, grantor, grantorType, grant_option): + """ + Parameters: + - role_name + - principal_name + - principal_type + - grantor + - grantorType + - grant_option + + """ + self.send_grant_role(role_name, principal_name, principal_type, grantor, grantorType, grant_option) + return self.recv_grant_role() + + def send_grant_role(self, role_name, principal_name, principal_type, grantor, grantorType, grant_option): + self._oprot.writeMessageBegin("grant_role", TMessageType.CALL, self._seqid) + args = grant_role_args() + args.role_name = role_name + args.principal_name = principal_name + args.principal_type = principal_type + args.grantor = grantor + args.grantorType = grantorType + args.grant_option = grant_option + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_grant_role(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = grant_role_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + raise TApplicationException(TApplicationException.MISSING_RESULT, "grant_role failed: unknown result") + + def revoke_role(self, role_name, principal_name, principal_type): + """ + Parameters: + - role_name + - principal_name + - principal_type + + """ + self.send_revoke_role(role_name, principal_name, principal_type) + return self.recv_revoke_role() + + def send_revoke_role(self, role_name, principal_name, principal_type): + self._oprot.writeMessageBegin("revoke_role", TMessageType.CALL, self._seqid) + args = revoke_role_args() + args.role_name = role_name + args.principal_name = principal_name + args.principal_type = principal_type + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_revoke_role(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = revoke_role_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + raise TApplicationException(TApplicationException.MISSING_RESULT, "revoke_role failed: unknown result") + + def list_roles(self, principal_name, principal_type): + """ + Parameters: + - principal_name + - principal_type + + """ + self.send_list_roles(principal_name, principal_type) + return self.recv_list_roles() + + def send_list_roles(self, principal_name, principal_type): + self._oprot.writeMessageBegin("list_roles", TMessageType.CALL, self._seqid) + args = list_roles_args() + args.principal_name = principal_name + args.principal_type = principal_type + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_list_roles(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = list_roles_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + raise TApplicationException(TApplicationException.MISSING_RESULT, "list_roles failed: unknown result") + + def grant_revoke_role(self, request): + """ + Parameters: + - request + + """ + self.send_grant_revoke_role(request) + return self.recv_grant_revoke_role() + + def send_grant_revoke_role(self, request): + self._oprot.writeMessageBegin("grant_revoke_role", TMessageType.CALL, self._seqid) + args = grant_revoke_role_args() + args.request = request + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_grant_revoke_role(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = grant_revoke_role_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + raise TApplicationException(TApplicationException.MISSING_RESULT, "grant_revoke_role failed: unknown result") + + def get_principals_in_role(self, request): + """ + Parameters: + - request + + """ + self.send_get_principals_in_role(request) + return self.recv_get_principals_in_role() + + def send_get_principals_in_role(self, request): + self._oprot.writeMessageBegin("get_principals_in_role", TMessageType.CALL, self._seqid) + args = get_principals_in_role_args() + args.request = request + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_get_principals_in_role(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = get_principals_in_role_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + raise TApplicationException(TApplicationException.MISSING_RESULT, "get_principals_in_role failed: unknown result") + + def get_role_grants_for_principal(self, request): + """ + Parameters: + - request + + """ + self.send_get_role_grants_for_principal(request) + return self.recv_get_role_grants_for_principal() + + def send_get_role_grants_for_principal(self, request): + self._oprot.writeMessageBegin("get_role_grants_for_principal", TMessageType.CALL, self._seqid) + args = get_role_grants_for_principal_args() + args.request = request + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_get_role_grants_for_principal(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = get_role_grants_for_principal_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + raise TApplicationException(TApplicationException.MISSING_RESULT, "get_role_grants_for_principal failed: unknown result") + + def get_privilege_set(self, hiveObject, user_name, group_names): + """ + Parameters: + - hiveObject + - user_name + - group_names + + """ + self.send_get_privilege_set(hiveObject, user_name, group_names) + return self.recv_get_privilege_set() + + def send_get_privilege_set(self, hiveObject, user_name, group_names): + self._oprot.writeMessageBegin("get_privilege_set", TMessageType.CALL, self._seqid) + args = get_privilege_set_args() + args.hiveObject = hiveObject + args.user_name = user_name + args.group_names = group_names + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_get_privilege_set(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = get_privilege_set_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + raise TApplicationException(TApplicationException.MISSING_RESULT, "get_privilege_set failed: unknown result") + + def list_privileges(self, principal_name, principal_type, hiveObject): + """ + Parameters: + - principal_name + - principal_type + - hiveObject + + """ + self.send_list_privileges(principal_name, principal_type, hiveObject) + return self.recv_list_privileges() + + def send_list_privileges(self, principal_name, principal_type, hiveObject): + self._oprot.writeMessageBegin("list_privileges", TMessageType.CALL, self._seqid) + args = list_privileges_args() + args.principal_name = principal_name + args.principal_type = principal_type + args.hiveObject = hiveObject + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_list_privileges(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = list_privileges_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + raise TApplicationException(TApplicationException.MISSING_RESULT, "list_privileges failed: unknown result") + + def grant_privileges(self, privileges): + """ + Parameters: + - privileges + + """ + self.send_grant_privileges(privileges) + return self.recv_grant_privileges() + + def send_grant_privileges(self, privileges): + self._oprot.writeMessageBegin("grant_privileges", TMessageType.CALL, self._seqid) + args = grant_privileges_args() + args.privileges = privileges + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_grant_privileges(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = grant_privileges_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + raise TApplicationException(TApplicationException.MISSING_RESULT, "grant_privileges failed: unknown result") + + def revoke_privileges(self, privileges): + """ + Parameters: + - privileges + + """ + self.send_revoke_privileges(privileges) + return self.recv_revoke_privileges() + + def send_revoke_privileges(self, privileges): + self._oprot.writeMessageBegin("revoke_privileges", TMessageType.CALL, self._seqid) + args = revoke_privileges_args() + args.privileges = privileges + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_revoke_privileges(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = revoke_privileges_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + raise TApplicationException(TApplicationException.MISSING_RESULT, "revoke_privileges failed: unknown result") + + def grant_revoke_privileges(self, request): + """ + Parameters: + - request + + """ + self.send_grant_revoke_privileges(request) + return self.recv_grant_revoke_privileges() + + def send_grant_revoke_privileges(self, request): + self._oprot.writeMessageBegin("grant_revoke_privileges", TMessageType.CALL, self._seqid) + args = grant_revoke_privileges_args() + args.request = request + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_grant_revoke_privileges(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = grant_revoke_privileges_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + raise TApplicationException(TApplicationException.MISSING_RESULT, "grant_revoke_privileges failed: unknown result") + + def refresh_privileges(self, objToRefresh, authorizer, grantRequest): + """ + Parameters: + - objToRefresh + - authorizer + - grantRequest + + """ + self.send_refresh_privileges(objToRefresh, authorizer, grantRequest) + return self.recv_refresh_privileges() + + def send_refresh_privileges(self, objToRefresh, authorizer, grantRequest): + self._oprot.writeMessageBegin("refresh_privileges", TMessageType.CALL, self._seqid) + args = refresh_privileges_args() + args.objToRefresh = objToRefresh + args.authorizer = authorizer + args.grantRequest = grantRequest + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_refresh_privileges(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = refresh_privileges_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + raise TApplicationException(TApplicationException.MISSING_RESULT, "refresh_privileges failed: unknown result") + + def set_ugi(self, user_name, group_names): + """ + Parameters: + - user_name + - group_names + + """ + self.send_set_ugi(user_name, group_names) + return self.recv_set_ugi() + + def send_set_ugi(self, user_name, group_names): + self._oprot.writeMessageBegin("set_ugi", TMessageType.CALL, self._seqid) + args = set_ugi_args() + args.user_name = user_name + args.group_names = group_names + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_set_ugi(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = set_ugi_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + raise TApplicationException(TApplicationException.MISSING_RESULT, "set_ugi failed: unknown result") + + def get_delegation_token(self, token_owner, renewer_kerberos_principal_name): + """ + Parameters: + - token_owner + - renewer_kerberos_principal_name + + """ + self.send_get_delegation_token(token_owner, renewer_kerberos_principal_name) + return self.recv_get_delegation_token() + + def send_get_delegation_token(self, token_owner, renewer_kerberos_principal_name): + self._oprot.writeMessageBegin("get_delegation_token", TMessageType.CALL, self._seqid) + args = get_delegation_token_args() + args.token_owner = token_owner + args.renewer_kerberos_principal_name = renewer_kerberos_principal_name + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_get_delegation_token(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = get_delegation_token_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + raise TApplicationException(TApplicationException.MISSING_RESULT, "get_delegation_token failed: unknown result") + + def renew_delegation_token(self, token_str_form): + """ + Parameters: + - token_str_form + + """ + self.send_renew_delegation_token(token_str_form) + return self.recv_renew_delegation_token() + + def send_renew_delegation_token(self, token_str_form): + self._oprot.writeMessageBegin("renew_delegation_token", TMessageType.CALL, self._seqid) + args = renew_delegation_token_args() + args.token_str_form = token_str_form + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_renew_delegation_token(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = renew_delegation_token_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + raise TApplicationException(TApplicationException.MISSING_RESULT, "renew_delegation_token failed: unknown result") + + def cancel_delegation_token(self, token_str_form): + """ + Parameters: + - token_str_form + + """ + self.send_cancel_delegation_token(token_str_form) + self.recv_cancel_delegation_token() + + def send_cancel_delegation_token(self, token_str_form): + self._oprot.writeMessageBegin("cancel_delegation_token", TMessageType.CALL, self._seqid) + args = cancel_delegation_token_args() + args.token_str_form = token_str_form + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_cancel_delegation_token(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = cancel_delegation_token_result() + result.read(iprot) + iprot.readMessageEnd() + if result.o1 is not None: + raise result.o1 + return + + def add_token(self, token_identifier, delegation_token): + """ + Parameters: + - token_identifier + - delegation_token + + """ + self.send_add_token(token_identifier, delegation_token) + return self.recv_add_token() + + def send_add_token(self, token_identifier, delegation_token): + self._oprot.writeMessageBegin("add_token", TMessageType.CALL, self._seqid) + args = add_token_args() + args.token_identifier = token_identifier + args.delegation_token = delegation_token + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_add_token(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = add_token_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + raise TApplicationException(TApplicationException.MISSING_RESULT, "add_token failed: unknown result") + + def remove_token(self, token_identifier): + """ + Parameters: + - token_identifier + + """ + self.send_remove_token(token_identifier) + return self.recv_remove_token() + + def send_remove_token(self, token_identifier): + self._oprot.writeMessageBegin("remove_token", TMessageType.CALL, self._seqid) + args = remove_token_args() + args.token_identifier = token_identifier + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_remove_token(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = remove_token_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + raise TApplicationException(TApplicationException.MISSING_RESULT, "remove_token failed: unknown result") + + def get_token(self, token_identifier): + """ + Parameters: + - token_identifier + + """ + self.send_get_token(token_identifier) + return self.recv_get_token() + + def send_get_token(self, token_identifier): + self._oprot.writeMessageBegin("get_token", TMessageType.CALL, self._seqid) + args = get_token_args() + args.token_identifier = token_identifier + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_get_token(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = get_token_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + raise TApplicationException(TApplicationException.MISSING_RESULT, "get_token failed: unknown result") + + def get_all_token_identifiers(self): + self.send_get_all_token_identifiers() + return self.recv_get_all_token_identifiers() + + def send_get_all_token_identifiers(self): + self._oprot.writeMessageBegin("get_all_token_identifiers", TMessageType.CALL, self._seqid) + args = get_all_token_identifiers_args() + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_get_all_token_identifiers(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = get_all_token_identifiers_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + raise TApplicationException(TApplicationException.MISSING_RESULT, "get_all_token_identifiers failed: unknown result") + + def add_master_key(self, key): + """ + Parameters: + - key + + """ + self.send_add_master_key(key) + return self.recv_add_master_key() + + def send_add_master_key(self, key): + self._oprot.writeMessageBegin("add_master_key", TMessageType.CALL, self._seqid) + args = add_master_key_args() + args.key = key + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_add_master_key(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = add_master_key_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + raise TApplicationException(TApplicationException.MISSING_RESULT, "add_master_key failed: unknown result") + + def update_master_key(self, seq_number, key): + """ + Parameters: + - seq_number + - key + + """ + self.send_update_master_key(seq_number, key) + self.recv_update_master_key() + + def send_update_master_key(self, seq_number, key): + self._oprot.writeMessageBegin("update_master_key", TMessageType.CALL, self._seqid) + args = update_master_key_args() + args.seq_number = seq_number + args.key = key + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_update_master_key(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = update_master_key_result() + result.read(iprot) + iprot.readMessageEnd() + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + return + + def remove_master_key(self, key_seq): + """ + Parameters: + - key_seq + + """ + self.send_remove_master_key(key_seq) + return self.recv_remove_master_key() + + def send_remove_master_key(self, key_seq): + self._oprot.writeMessageBegin("remove_master_key", TMessageType.CALL, self._seqid) + args = remove_master_key_args() + args.key_seq = key_seq + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_remove_master_key(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = remove_master_key_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + raise TApplicationException(TApplicationException.MISSING_RESULT, "remove_master_key failed: unknown result") + + def get_master_keys(self): + self.send_get_master_keys() + return self.recv_get_master_keys() + + def send_get_master_keys(self): + self._oprot.writeMessageBegin("get_master_keys", TMessageType.CALL, self._seqid) + args = get_master_keys_args() + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_get_master_keys(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = get_master_keys_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + raise TApplicationException(TApplicationException.MISSING_RESULT, "get_master_keys failed: unknown result") + + def get_open_txns(self): + self.send_get_open_txns() + return self.recv_get_open_txns() + + def send_get_open_txns(self): + self._oprot.writeMessageBegin("get_open_txns", TMessageType.CALL, self._seqid) + args = get_open_txns_args() + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_get_open_txns(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = get_open_txns_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + raise TApplicationException(TApplicationException.MISSING_RESULT, "get_open_txns failed: unknown result") + + def get_open_txns_info(self): + self.send_get_open_txns_info() + return self.recv_get_open_txns_info() + + def send_get_open_txns_info(self): + self._oprot.writeMessageBegin("get_open_txns_info", TMessageType.CALL, self._seqid) + args = get_open_txns_info_args() + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_get_open_txns_info(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = get_open_txns_info_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + raise TApplicationException(TApplicationException.MISSING_RESULT, "get_open_txns_info failed: unknown result") + + def open_txns(self, rqst): + """ + Parameters: + - rqst + + """ + self.send_open_txns(rqst) + return self.recv_open_txns() + + def send_open_txns(self, rqst): + self._oprot.writeMessageBegin("open_txns", TMessageType.CALL, self._seqid) + args = open_txns_args() + args.rqst = rqst + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_open_txns(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = open_txns_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + raise TApplicationException(TApplicationException.MISSING_RESULT, "open_txns failed: unknown result") + + def abort_txn(self, rqst): + """ + Parameters: + - rqst + + """ + self.send_abort_txn(rqst) + self.recv_abort_txn() + + def send_abort_txn(self, rqst): + self._oprot.writeMessageBegin("abort_txn", TMessageType.CALL, self._seqid) + args = abort_txn_args() + args.rqst = rqst + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_abort_txn(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = abort_txn_result() + result.read(iprot) + iprot.readMessageEnd() + if result.o1 is not None: + raise result.o1 + return + + def abort_txns(self, rqst): + """ + Parameters: + - rqst + + """ + self.send_abort_txns(rqst) + self.recv_abort_txns() + + def send_abort_txns(self, rqst): + self._oprot.writeMessageBegin("abort_txns", TMessageType.CALL, self._seqid) + args = abort_txns_args() + args.rqst = rqst + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_abort_txns(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = abort_txns_result() + result.read(iprot) + iprot.readMessageEnd() + if result.o1 is not None: + raise result.o1 + return + + def commit_txn(self, rqst): + """ + Parameters: + - rqst + + """ + self.send_commit_txn(rqst) + self.recv_commit_txn() + + def send_commit_txn(self, rqst): + self._oprot.writeMessageBegin("commit_txn", TMessageType.CALL, self._seqid) + args = commit_txn_args() + args.rqst = rqst + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_commit_txn(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = commit_txn_result() + result.read(iprot) + iprot.readMessageEnd() + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + return + + def get_latest_txnid_in_conflict(self, txnId): + """ + Parameters: + - txnId + + """ + self.send_get_latest_txnid_in_conflict(txnId) + return self.recv_get_latest_txnid_in_conflict() + + def send_get_latest_txnid_in_conflict(self, txnId): + self._oprot.writeMessageBegin("get_latest_txnid_in_conflict", TMessageType.CALL, self._seqid) + args = get_latest_txnid_in_conflict_args() + args.txnId = txnId + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_get_latest_txnid_in_conflict(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = get_latest_txnid_in_conflict_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + raise TApplicationException(TApplicationException.MISSING_RESULT, "get_latest_txnid_in_conflict failed: unknown result") + + def repl_tbl_writeid_state(self, rqst): + """ + Parameters: + - rqst + + """ + self.send_repl_tbl_writeid_state(rqst) + self.recv_repl_tbl_writeid_state() + + def send_repl_tbl_writeid_state(self, rqst): + self._oprot.writeMessageBegin("repl_tbl_writeid_state", TMessageType.CALL, self._seqid) + args = repl_tbl_writeid_state_args() + args.rqst = rqst + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_repl_tbl_writeid_state(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = repl_tbl_writeid_state_result() + result.read(iprot) + iprot.readMessageEnd() + return + + def get_valid_write_ids(self, rqst): + """ + Parameters: + - rqst + + """ + self.send_get_valid_write_ids(rqst) + return self.recv_get_valid_write_ids() + + def send_get_valid_write_ids(self, rqst): + self._oprot.writeMessageBegin("get_valid_write_ids", TMessageType.CALL, self._seqid) + args = get_valid_write_ids_args() + args.rqst = rqst + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_get_valid_write_ids(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = get_valid_write_ids_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + raise TApplicationException(TApplicationException.MISSING_RESULT, "get_valid_write_ids failed: unknown result") + + def allocate_table_write_ids(self, rqst): + """ + Parameters: + - rqst + + """ + self.send_allocate_table_write_ids(rqst) + return self.recv_allocate_table_write_ids() + + def send_allocate_table_write_ids(self, rqst): + self._oprot.writeMessageBegin("allocate_table_write_ids", TMessageType.CALL, self._seqid) + args = allocate_table_write_ids_args() + args.rqst = rqst + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_allocate_table_write_ids(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = allocate_table_write_ids_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + if result.o3 is not None: + raise result.o3 + raise TApplicationException(TApplicationException.MISSING_RESULT, "allocate_table_write_ids failed: unknown result") + + def get_max_allocated_table_write_id(self, rqst): + """ + Parameters: + - rqst + + """ + self.send_get_max_allocated_table_write_id(rqst) + return self.recv_get_max_allocated_table_write_id() + + def send_get_max_allocated_table_write_id(self, rqst): + self._oprot.writeMessageBegin("get_max_allocated_table_write_id", TMessageType.CALL, self._seqid) + args = get_max_allocated_table_write_id_args() + args.rqst = rqst + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_get_max_allocated_table_write_id(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = get_max_allocated_table_write_id_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + raise TApplicationException( + TApplicationException.MISSING_RESULT, "get_max_allocated_table_write_id failed: unknown result" + ) + + def seed_write_id(self, rqst): + """ + Parameters: + - rqst + + """ + self.send_seed_write_id(rqst) + self.recv_seed_write_id() + + def send_seed_write_id(self, rqst): + self._oprot.writeMessageBegin("seed_write_id", TMessageType.CALL, self._seqid) + args = seed_write_id_args() + args.rqst = rqst + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_seed_write_id(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = seed_write_id_result() + result.read(iprot) + iprot.readMessageEnd() + if result.o1 is not None: + raise result.o1 + return + + def seed_txn_id(self, rqst): + """ + Parameters: + - rqst + + """ + self.send_seed_txn_id(rqst) + self.recv_seed_txn_id() + + def send_seed_txn_id(self, rqst): + self._oprot.writeMessageBegin("seed_txn_id", TMessageType.CALL, self._seqid) + args = seed_txn_id_args() + args.rqst = rqst + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_seed_txn_id(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = seed_txn_id_result() + result.read(iprot) + iprot.readMessageEnd() + if result.o1 is not None: + raise result.o1 + return + + def lock(self, rqst): + """ + Parameters: + - rqst + + """ + self.send_lock(rqst) + return self.recv_lock() + + def send_lock(self, rqst): + self._oprot.writeMessageBegin("lock", TMessageType.CALL, self._seqid) + args = lock_args() + args.rqst = rqst + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_lock(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = lock_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + raise TApplicationException(TApplicationException.MISSING_RESULT, "lock failed: unknown result") + + def check_lock(self, rqst): + """ + Parameters: + - rqst + + """ + self.send_check_lock(rqst) + return self.recv_check_lock() + + def send_check_lock(self, rqst): + self._oprot.writeMessageBegin("check_lock", TMessageType.CALL, self._seqid) + args = check_lock_args() + args.rqst = rqst + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_check_lock(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = check_lock_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + if result.o3 is not None: + raise result.o3 + raise TApplicationException(TApplicationException.MISSING_RESULT, "check_lock failed: unknown result") + + def unlock(self, rqst): + """ + Parameters: + - rqst + + """ + self.send_unlock(rqst) + self.recv_unlock() + + def send_unlock(self, rqst): + self._oprot.writeMessageBegin("unlock", TMessageType.CALL, self._seqid) + args = unlock_args() + args.rqst = rqst + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_unlock(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = unlock_result() + result.read(iprot) + iprot.readMessageEnd() + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + return + + def show_locks(self, rqst): + """ + Parameters: + - rqst + + """ + self.send_show_locks(rqst) + return self.recv_show_locks() + + def send_show_locks(self, rqst): + self._oprot.writeMessageBegin("show_locks", TMessageType.CALL, self._seqid) + args = show_locks_args() + args.rqst = rqst + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_show_locks(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = show_locks_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + raise TApplicationException(TApplicationException.MISSING_RESULT, "show_locks failed: unknown result") + + def heartbeat(self, ids): + """ + Parameters: + - ids + + """ + self.send_heartbeat(ids) + self.recv_heartbeat() + + def send_heartbeat(self, ids): + self._oprot.writeMessageBegin("heartbeat", TMessageType.CALL, self._seqid) + args = heartbeat_args() + args.ids = ids + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_heartbeat(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = heartbeat_result() + result.read(iprot) + iprot.readMessageEnd() + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + if result.o3 is not None: + raise result.o3 + return + + def heartbeat_txn_range(self, txns): + """ + Parameters: + - txns + + """ + self.send_heartbeat_txn_range(txns) + return self.recv_heartbeat_txn_range() + + def send_heartbeat_txn_range(self, txns): + self._oprot.writeMessageBegin("heartbeat_txn_range", TMessageType.CALL, self._seqid) + args = heartbeat_txn_range_args() + args.txns = txns + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_heartbeat_txn_range(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = heartbeat_txn_range_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + raise TApplicationException(TApplicationException.MISSING_RESULT, "heartbeat_txn_range failed: unknown result") + + def compact(self, rqst): + """ + Parameters: + - rqst + + """ + self.send_compact(rqst) + self.recv_compact() + + def send_compact(self, rqst): + self._oprot.writeMessageBegin("compact", TMessageType.CALL, self._seqid) + args = compact_args() + args.rqst = rqst + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_compact(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = compact_result() + result.read(iprot) + iprot.readMessageEnd() + return + + def compact2(self, rqst): + """ + Parameters: + - rqst + + """ + self.send_compact2(rqst) + return self.recv_compact2() + + def send_compact2(self, rqst): + self._oprot.writeMessageBegin("compact2", TMessageType.CALL, self._seqid) + args = compact2_args() + args.rqst = rqst + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_compact2(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = compact2_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + raise TApplicationException(TApplicationException.MISSING_RESULT, "compact2 failed: unknown result") + + def show_compact(self, rqst): + """ + Parameters: + - rqst + + """ + self.send_show_compact(rqst) + return self.recv_show_compact() + + def send_show_compact(self, rqst): + self._oprot.writeMessageBegin("show_compact", TMessageType.CALL, self._seqid) + args = show_compact_args() + args.rqst = rqst + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_show_compact(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = show_compact_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + raise TApplicationException(TApplicationException.MISSING_RESULT, "show_compact failed: unknown result") + + def add_dynamic_partitions(self, rqst): + """ + Parameters: + - rqst + + """ + self.send_add_dynamic_partitions(rqst) + self.recv_add_dynamic_partitions() + + def send_add_dynamic_partitions(self, rqst): + self._oprot.writeMessageBegin("add_dynamic_partitions", TMessageType.CALL, self._seqid) + args = add_dynamic_partitions_args() + args.rqst = rqst + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_add_dynamic_partitions(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = add_dynamic_partitions_result() + result.read(iprot) + iprot.readMessageEnd() + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + return + + def find_next_compact(self, workerId): + """ + Parameters: + - workerId + + """ + self.send_find_next_compact(workerId) + return self.recv_find_next_compact() + + def send_find_next_compact(self, workerId): + self._oprot.writeMessageBegin("find_next_compact", TMessageType.CALL, self._seqid) + args = find_next_compact_args() + args.workerId = workerId + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_find_next_compact(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = find_next_compact_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + raise TApplicationException(TApplicationException.MISSING_RESULT, "find_next_compact failed: unknown result") + + def find_next_compact2(self, rqst): + """ + Parameters: + - rqst + + """ + self.send_find_next_compact2(rqst) + return self.recv_find_next_compact2() + + def send_find_next_compact2(self, rqst): + self._oprot.writeMessageBegin("find_next_compact2", TMessageType.CALL, self._seqid) + args = find_next_compact2_args() + args.rqst = rqst + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_find_next_compact2(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = find_next_compact2_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + raise TApplicationException(TApplicationException.MISSING_RESULT, "find_next_compact2 failed: unknown result") + + def update_compactor_state(self, cr, txn_id): + """ + Parameters: + - cr + - txn_id + + """ + self.send_update_compactor_state(cr, txn_id) + self.recv_update_compactor_state() + + def send_update_compactor_state(self, cr, txn_id): + self._oprot.writeMessageBegin("update_compactor_state", TMessageType.CALL, self._seqid) + args = update_compactor_state_args() + args.cr = cr + args.txn_id = txn_id + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_update_compactor_state(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = update_compactor_state_result() + result.read(iprot) + iprot.readMessageEnd() + return + + def find_columns_with_stats(self, cr): + """ + Parameters: + - cr + + """ + self.send_find_columns_with_stats(cr) + return self.recv_find_columns_with_stats() + + def send_find_columns_with_stats(self, cr): + self._oprot.writeMessageBegin("find_columns_with_stats", TMessageType.CALL, self._seqid) + args = find_columns_with_stats_args() + args.cr = cr + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_find_columns_with_stats(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = find_columns_with_stats_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + raise TApplicationException(TApplicationException.MISSING_RESULT, "find_columns_with_stats failed: unknown result") + + def mark_cleaned(self, cr): + """ + Parameters: + - cr + + """ + self.send_mark_cleaned(cr) + self.recv_mark_cleaned() + + def send_mark_cleaned(self, cr): + self._oprot.writeMessageBegin("mark_cleaned", TMessageType.CALL, self._seqid) + args = mark_cleaned_args() + args.cr = cr + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_mark_cleaned(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = mark_cleaned_result() + result.read(iprot) + iprot.readMessageEnd() + if result.o1 is not None: + raise result.o1 + return + + def mark_compacted(self, cr): + """ + Parameters: + - cr + + """ + self.send_mark_compacted(cr) + self.recv_mark_compacted() + + def send_mark_compacted(self, cr): + self._oprot.writeMessageBegin("mark_compacted", TMessageType.CALL, self._seqid) + args = mark_compacted_args() + args.cr = cr + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_mark_compacted(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = mark_compacted_result() + result.read(iprot) + iprot.readMessageEnd() + if result.o1 is not None: + raise result.o1 + return + + def mark_failed(self, cr): + """ + Parameters: + - cr + + """ + self.send_mark_failed(cr) + self.recv_mark_failed() + + def send_mark_failed(self, cr): + self._oprot.writeMessageBegin("mark_failed", TMessageType.CALL, self._seqid) + args = mark_failed_args() + args.cr = cr + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_mark_failed(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = mark_failed_result() + result.read(iprot) + iprot.readMessageEnd() + if result.o1 is not None: + raise result.o1 + return + + def mark_refused(self, cr): + """ + Parameters: + - cr + + """ + self.send_mark_refused(cr) + self.recv_mark_refused() + + def send_mark_refused(self, cr): + self._oprot.writeMessageBegin("mark_refused", TMessageType.CALL, self._seqid) + args = mark_refused_args() + args.cr = cr + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_mark_refused(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = mark_refused_result() + result.read(iprot) + iprot.readMessageEnd() + if result.o1 is not None: + raise result.o1 + return + + def update_compaction_metrics_data(self, data): + """ + Parameters: + - data + + """ + self.send_update_compaction_metrics_data(data) + return self.recv_update_compaction_metrics_data() + + def send_update_compaction_metrics_data(self, data): + self._oprot.writeMessageBegin("update_compaction_metrics_data", TMessageType.CALL, self._seqid) + args = update_compaction_metrics_data_args() + args.data = data + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_update_compaction_metrics_data(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = update_compaction_metrics_data_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + raise TApplicationException(TApplicationException.MISSING_RESULT, "update_compaction_metrics_data failed: unknown result") + + def remove_compaction_metrics_data(self, request): + """ + Parameters: + - request + + """ + self.send_remove_compaction_metrics_data(request) + self.recv_remove_compaction_metrics_data() + + def send_remove_compaction_metrics_data(self, request): + self._oprot.writeMessageBegin("remove_compaction_metrics_data", TMessageType.CALL, self._seqid) + args = remove_compaction_metrics_data_args() + args.request = request + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_remove_compaction_metrics_data(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = remove_compaction_metrics_data_result() + result.read(iprot) + iprot.readMessageEnd() + if result.o1 is not None: + raise result.o1 + return + + def set_hadoop_jobid(self, jobId, cq_id): + """ + Parameters: + - jobId + - cq_id + + """ + self.send_set_hadoop_jobid(jobId, cq_id) + self.recv_set_hadoop_jobid() + + def send_set_hadoop_jobid(self, jobId, cq_id): + self._oprot.writeMessageBegin("set_hadoop_jobid", TMessageType.CALL, self._seqid) + args = set_hadoop_jobid_args() + args.jobId = jobId + args.cq_id = cq_id + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_set_hadoop_jobid(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = set_hadoop_jobid_result() + result.read(iprot) + iprot.readMessageEnd() + return + + def get_latest_committed_compaction_info(self, rqst): + """ + Parameters: + - rqst + + """ + self.send_get_latest_committed_compaction_info(rqst) + return self.recv_get_latest_committed_compaction_info() + + def send_get_latest_committed_compaction_info(self, rqst): + self._oprot.writeMessageBegin("get_latest_committed_compaction_info", TMessageType.CALL, self._seqid) + args = get_latest_committed_compaction_info_args() + args.rqst = rqst + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_get_latest_committed_compaction_info(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = get_latest_committed_compaction_info_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + raise TApplicationException( + TApplicationException.MISSING_RESULT, "get_latest_committed_compaction_info failed: unknown result" + ) + + def get_next_notification(self, rqst): + """ + Parameters: + - rqst + + """ + self.send_get_next_notification(rqst) + return self.recv_get_next_notification() + + def send_get_next_notification(self, rqst): + self._oprot.writeMessageBegin("get_next_notification", TMessageType.CALL, self._seqid) + args = get_next_notification_args() + args.rqst = rqst + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_get_next_notification(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = get_next_notification_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + raise TApplicationException(TApplicationException.MISSING_RESULT, "get_next_notification failed: unknown result") + + def get_current_notificationEventId(self): + self.send_get_current_notificationEventId() + return self.recv_get_current_notificationEventId() + + def send_get_current_notificationEventId(self): + self._oprot.writeMessageBegin("get_current_notificationEventId", TMessageType.CALL, self._seqid) + args = get_current_notificationEventId_args() + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_get_current_notificationEventId(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = get_current_notificationEventId_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + raise TApplicationException( + TApplicationException.MISSING_RESULT, "get_current_notificationEventId failed: unknown result" + ) + + def get_notification_events_count(self, rqst): + """ + Parameters: + - rqst + + """ + self.send_get_notification_events_count(rqst) + return self.recv_get_notification_events_count() + + def send_get_notification_events_count(self, rqst): + self._oprot.writeMessageBegin("get_notification_events_count", TMessageType.CALL, self._seqid) + args = get_notification_events_count_args() + args.rqst = rqst + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_get_notification_events_count(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = get_notification_events_count_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + raise TApplicationException(TApplicationException.MISSING_RESULT, "get_notification_events_count failed: unknown result") + + def fire_listener_event(self, rqst): + """ + Parameters: + - rqst + + """ + self.send_fire_listener_event(rqst) + return self.recv_fire_listener_event() + + def send_fire_listener_event(self, rqst): + self._oprot.writeMessageBegin("fire_listener_event", TMessageType.CALL, self._seqid) + args = fire_listener_event_args() + args.rqst = rqst + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_fire_listener_event(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = fire_listener_event_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + raise TApplicationException(TApplicationException.MISSING_RESULT, "fire_listener_event failed: unknown result") + + def flushCache(self): + self.send_flushCache() + self.recv_flushCache() + + def send_flushCache(self): + self._oprot.writeMessageBegin("flushCache", TMessageType.CALL, self._seqid) + args = flushCache_args() + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_flushCache(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = flushCache_result() + result.read(iprot) + iprot.readMessageEnd() + return + + def add_write_notification_log(self, rqst): + """ + Parameters: + - rqst + + """ + self.send_add_write_notification_log(rqst) + return self.recv_add_write_notification_log() + + def send_add_write_notification_log(self, rqst): + self._oprot.writeMessageBegin("add_write_notification_log", TMessageType.CALL, self._seqid) + args = add_write_notification_log_args() + args.rqst = rqst + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_add_write_notification_log(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = add_write_notification_log_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + raise TApplicationException(TApplicationException.MISSING_RESULT, "add_write_notification_log failed: unknown result") + + def add_write_notification_log_in_batch(self, rqst): + """ + Parameters: + - rqst + + """ + self.send_add_write_notification_log_in_batch(rqst) + return self.recv_add_write_notification_log_in_batch() + + def send_add_write_notification_log_in_batch(self, rqst): + self._oprot.writeMessageBegin("add_write_notification_log_in_batch", TMessageType.CALL, self._seqid) + args = add_write_notification_log_in_batch_args() + args.rqst = rqst + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_add_write_notification_log_in_batch(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = add_write_notification_log_in_batch_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + raise TApplicationException( + TApplicationException.MISSING_RESULT, "add_write_notification_log_in_batch failed: unknown result" + ) + + def cm_recycle(self, request): + """ + Parameters: + - request + + """ + self.send_cm_recycle(request) + return self.recv_cm_recycle() + + def send_cm_recycle(self, request): + self._oprot.writeMessageBegin("cm_recycle", TMessageType.CALL, self._seqid) + args = cm_recycle_args() + args.request = request + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_cm_recycle(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = cm_recycle_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + raise TApplicationException(TApplicationException.MISSING_RESULT, "cm_recycle failed: unknown result") + + def get_file_metadata_by_expr(self, req): + """ + Parameters: + - req + + """ + self.send_get_file_metadata_by_expr(req) + return self.recv_get_file_metadata_by_expr() + + def send_get_file_metadata_by_expr(self, req): + self._oprot.writeMessageBegin("get_file_metadata_by_expr", TMessageType.CALL, self._seqid) + args = get_file_metadata_by_expr_args() + args.req = req + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_get_file_metadata_by_expr(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = get_file_metadata_by_expr_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + raise TApplicationException(TApplicationException.MISSING_RESULT, "get_file_metadata_by_expr failed: unknown result") + + def get_file_metadata(self, req): + """ + Parameters: + - req + + """ + self.send_get_file_metadata(req) + return self.recv_get_file_metadata() + + def send_get_file_metadata(self, req): + self._oprot.writeMessageBegin("get_file_metadata", TMessageType.CALL, self._seqid) + args = get_file_metadata_args() + args.req = req + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_get_file_metadata(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = get_file_metadata_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + raise TApplicationException(TApplicationException.MISSING_RESULT, "get_file_metadata failed: unknown result") + + def put_file_metadata(self, req): + """ + Parameters: + - req + + """ + self.send_put_file_metadata(req) + return self.recv_put_file_metadata() + + def send_put_file_metadata(self, req): + self._oprot.writeMessageBegin("put_file_metadata", TMessageType.CALL, self._seqid) + args = put_file_metadata_args() + args.req = req + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_put_file_metadata(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = put_file_metadata_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + raise TApplicationException(TApplicationException.MISSING_RESULT, "put_file_metadata failed: unknown result") + + def clear_file_metadata(self, req): + """ + Parameters: + - req + + """ + self.send_clear_file_metadata(req) + return self.recv_clear_file_metadata() + + def send_clear_file_metadata(self, req): + self._oprot.writeMessageBegin("clear_file_metadata", TMessageType.CALL, self._seqid) + args = clear_file_metadata_args() + args.req = req + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_clear_file_metadata(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = clear_file_metadata_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + raise TApplicationException(TApplicationException.MISSING_RESULT, "clear_file_metadata failed: unknown result") + + def cache_file_metadata(self, req): + """ + Parameters: + - req + + """ + self.send_cache_file_metadata(req) + return self.recv_cache_file_metadata() + + def send_cache_file_metadata(self, req): + self._oprot.writeMessageBegin("cache_file_metadata", TMessageType.CALL, self._seqid) + args = cache_file_metadata_args() + args.req = req + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_cache_file_metadata(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = cache_file_metadata_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + raise TApplicationException(TApplicationException.MISSING_RESULT, "cache_file_metadata failed: unknown result") + + def get_metastore_db_uuid(self): + self.send_get_metastore_db_uuid() + return self.recv_get_metastore_db_uuid() + + def send_get_metastore_db_uuid(self): + self._oprot.writeMessageBegin("get_metastore_db_uuid", TMessageType.CALL, self._seqid) + args = get_metastore_db_uuid_args() + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_get_metastore_db_uuid(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = get_metastore_db_uuid_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + raise TApplicationException(TApplicationException.MISSING_RESULT, "get_metastore_db_uuid failed: unknown result") + + def create_resource_plan(self, request): + """ + Parameters: + - request + + """ + self.send_create_resource_plan(request) + return self.recv_create_resource_plan() + + def send_create_resource_plan(self, request): + self._oprot.writeMessageBegin("create_resource_plan", TMessageType.CALL, self._seqid) + args = create_resource_plan_args() + args.request = request + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_create_resource_plan(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = create_resource_plan_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + if result.o3 is not None: + raise result.o3 + raise TApplicationException(TApplicationException.MISSING_RESULT, "create_resource_plan failed: unknown result") + + def get_resource_plan(self, request): + """ + Parameters: + - request + + """ + self.send_get_resource_plan(request) + return self.recv_get_resource_plan() + + def send_get_resource_plan(self, request): + self._oprot.writeMessageBegin("get_resource_plan", TMessageType.CALL, self._seqid) + args = get_resource_plan_args() + args.request = request + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_get_resource_plan(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = get_resource_plan_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + raise TApplicationException(TApplicationException.MISSING_RESULT, "get_resource_plan failed: unknown result") + + def get_active_resource_plan(self, request): + """ + Parameters: + - request + + """ + self.send_get_active_resource_plan(request) + return self.recv_get_active_resource_plan() + + def send_get_active_resource_plan(self, request): + self._oprot.writeMessageBegin("get_active_resource_plan", TMessageType.CALL, self._seqid) + args = get_active_resource_plan_args() + args.request = request + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_get_active_resource_plan(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = get_active_resource_plan_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o2 is not None: + raise result.o2 + raise TApplicationException(TApplicationException.MISSING_RESULT, "get_active_resource_plan failed: unknown result") + + def get_all_resource_plans(self, request): + """ + Parameters: + - request + + """ + self.send_get_all_resource_plans(request) + return self.recv_get_all_resource_plans() + + def send_get_all_resource_plans(self, request): + self._oprot.writeMessageBegin("get_all_resource_plans", TMessageType.CALL, self._seqid) + args = get_all_resource_plans_args() + args.request = request + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_get_all_resource_plans(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = get_all_resource_plans_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + raise TApplicationException(TApplicationException.MISSING_RESULT, "get_all_resource_plans failed: unknown result") + + def alter_resource_plan(self, request): + """ + Parameters: + - request + + """ + self.send_alter_resource_plan(request) + return self.recv_alter_resource_plan() + + def send_alter_resource_plan(self, request): + self._oprot.writeMessageBegin("alter_resource_plan", TMessageType.CALL, self._seqid) + args = alter_resource_plan_args() + args.request = request + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_alter_resource_plan(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = alter_resource_plan_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + if result.o3 is not None: + raise result.o3 + raise TApplicationException(TApplicationException.MISSING_RESULT, "alter_resource_plan failed: unknown result") + + def validate_resource_plan(self, request): + """ + Parameters: + - request + + """ + self.send_validate_resource_plan(request) + return self.recv_validate_resource_plan() + + def send_validate_resource_plan(self, request): + self._oprot.writeMessageBegin("validate_resource_plan", TMessageType.CALL, self._seqid) + args = validate_resource_plan_args() + args.request = request + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_validate_resource_plan(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = validate_resource_plan_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + raise TApplicationException(TApplicationException.MISSING_RESULT, "validate_resource_plan failed: unknown result") + + def drop_resource_plan(self, request): + """ + Parameters: + - request + + """ + self.send_drop_resource_plan(request) + return self.recv_drop_resource_plan() + + def send_drop_resource_plan(self, request): + self._oprot.writeMessageBegin("drop_resource_plan", TMessageType.CALL, self._seqid) + args = drop_resource_plan_args() + args.request = request + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_drop_resource_plan(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = drop_resource_plan_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + if result.o3 is not None: + raise result.o3 + raise TApplicationException(TApplicationException.MISSING_RESULT, "drop_resource_plan failed: unknown result") + + def create_wm_trigger(self, request): + """ + Parameters: + - request + + """ + self.send_create_wm_trigger(request) + return self.recv_create_wm_trigger() + + def send_create_wm_trigger(self, request): + self._oprot.writeMessageBegin("create_wm_trigger", TMessageType.CALL, self._seqid) + args = create_wm_trigger_args() + args.request = request + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_create_wm_trigger(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = create_wm_trigger_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + if result.o3 is not None: + raise result.o3 + if result.o4 is not None: + raise result.o4 + raise TApplicationException(TApplicationException.MISSING_RESULT, "create_wm_trigger failed: unknown result") + + def alter_wm_trigger(self, request): + """ + Parameters: + - request + + """ + self.send_alter_wm_trigger(request) + return self.recv_alter_wm_trigger() + + def send_alter_wm_trigger(self, request): + self._oprot.writeMessageBegin("alter_wm_trigger", TMessageType.CALL, self._seqid) + args = alter_wm_trigger_args() + args.request = request + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_alter_wm_trigger(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = alter_wm_trigger_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + if result.o3 is not None: + raise result.o3 + raise TApplicationException(TApplicationException.MISSING_RESULT, "alter_wm_trigger failed: unknown result") + + def drop_wm_trigger(self, request): + """ + Parameters: + - request + + """ + self.send_drop_wm_trigger(request) + return self.recv_drop_wm_trigger() + + def send_drop_wm_trigger(self, request): + self._oprot.writeMessageBegin("drop_wm_trigger", TMessageType.CALL, self._seqid) + args = drop_wm_trigger_args() + args.request = request + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_drop_wm_trigger(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = drop_wm_trigger_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + if result.o3 is not None: + raise result.o3 + raise TApplicationException(TApplicationException.MISSING_RESULT, "drop_wm_trigger failed: unknown result") + + def get_triggers_for_resourceplan(self, request): + """ + Parameters: + - request + + """ + self.send_get_triggers_for_resourceplan(request) + return self.recv_get_triggers_for_resourceplan() + + def send_get_triggers_for_resourceplan(self, request): + self._oprot.writeMessageBegin("get_triggers_for_resourceplan", TMessageType.CALL, self._seqid) + args = get_triggers_for_resourceplan_args() + args.request = request + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_get_triggers_for_resourceplan(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = get_triggers_for_resourceplan_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + raise TApplicationException(TApplicationException.MISSING_RESULT, "get_triggers_for_resourceplan failed: unknown result") + + def create_wm_pool(self, request): + """ + Parameters: + - request + + """ + self.send_create_wm_pool(request) + return self.recv_create_wm_pool() + + def send_create_wm_pool(self, request): + self._oprot.writeMessageBegin("create_wm_pool", TMessageType.CALL, self._seqid) + args = create_wm_pool_args() + args.request = request + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_create_wm_pool(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = create_wm_pool_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + if result.o3 is not None: + raise result.o3 + if result.o4 is not None: + raise result.o4 + raise TApplicationException(TApplicationException.MISSING_RESULT, "create_wm_pool failed: unknown result") + + def alter_wm_pool(self, request): + """ + Parameters: + - request + + """ + self.send_alter_wm_pool(request) + return self.recv_alter_wm_pool() + + def send_alter_wm_pool(self, request): + self._oprot.writeMessageBegin("alter_wm_pool", TMessageType.CALL, self._seqid) + args = alter_wm_pool_args() + args.request = request + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_alter_wm_pool(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = alter_wm_pool_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + if result.o3 is not None: + raise result.o3 + if result.o4 is not None: + raise result.o4 + raise TApplicationException(TApplicationException.MISSING_RESULT, "alter_wm_pool failed: unknown result") + + def drop_wm_pool(self, request): + """ + Parameters: + - request + + """ + self.send_drop_wm_pool(request) + return self.recv_drop_wm_pool() + + def send_drop_wm_pool(self, request): + self._oprot.writeMessageBegin("drop_wm_pool", TMessageType.CALL, self._seqid) + args = drop_wm_pool_args() + args.request = request + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_drop_wm_pool(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = drop_wm_pool_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + if result.o3 is not None: + raise result.o3 + raise TApplicationException(TApplicationException.MISSING_RESULT, "drop_wm_pool failed: unknown result") + + def create_or_update_wm_mapping(self, request): + """ + Parameters: + - request + + """ + self.send_create_or_update_wm_mapping(request) + return self.recv_create_or_update_wm_mapping() + + def send_create_or_update_wm_mapping(self, request): + self._oprot.writeMessageBegin("create_or_update_wm_mapping", TMessageType.CALL, self._seqid) + args = create_or_update_wm_mapping_args() + args.request = request + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_create_or_update_wm_mapping(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = create_or_update_wm_mapping_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + if result.o3 is not None: + raise result.o3 + if result.o4 is not None: + raise result.o4 + raise TApplicationException(TApplicationException.MISSING_RESULT, "create_or_update_wm_mapping failed: unknown result") + + def drop_wm_mapping(self, request): + """ + Parameters: + - request + + """ + self.send_drop_wm_mapping(request) + return self.recv_drop_wm_mapping() + + def send_drop_wm_mapping(self, request): + self._oprot.writeMessageBegin("drop_wm_mapping", TMessageType.CALL, self._seqid) + args = drop_wm_mapping_args() + args.request = request + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_drop_wm_mapping(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = drop_wm_mapping_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + if result.o3 is not None: + raise result.o3 + raise TApplicationException(TApplicationException.MISSING_RESULT, "drop_wm_mapping failed: unknown result") + + def create_or_drop_wm_trigger_to_pool_mapping(self, request): + """ + Parameters: + - request + + """ + self.send_create_or_drop_wm_trigger_to_pool_mapping(request) + return self.recv_create_or_drop_wm_trigger_to_pool_mapping() + + def send_create_or_drop_wm_trigger_to_pool_mapping(self, request): + self._oprot.writeMessageBegin("create_or_drop_wm_trigger_to_pool_mapping", TMessageType.CALL, self._seqid) + args = create_or_drop_wm_trigger_to_pool_mapping_args() + args.request = request + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_create_or_drop_wm_trigger_to_pool_mapping(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = create_or_drop_wm_trigger_to_pool_mapping_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + if result.o3 is not None: + raise result.o3 + if result.o4 is not None: + raise result.o4 + raise TApplicationException( + TApplicationException.MISSING_RESULT, "create_or_drop_wm_trigger_to_pool_mapping failed: unknown result" + ) + + def create_ischema(self, schema): + """ + Parameters: + - schema + + """ + self.send_create_ischema(schema) + self.recv_create_ischema() + + def send_create_ischema(self, schema): + self._oprot.writeMessageBegin("create_ischema", TMessageType.CALL, self._seqid) + args = create_ischema_args() + args.schema = schema + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_create_ischema(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = create_ischema_result() + result.read(iprot) + iprot.readMessageEnd() + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + if result.o3 is not None: + raise result.o3 + return + + def alter_ischema(self, rqst): + """ + Parameters: + - rqst + + """ + self.send_alter_ischema(rqst) + self.recv_alter_ischema() + + def send_alter_ischema(self, rqst): + self._oprot.writeMessageBegin("alter_ischema", TMessageType.CALL, self._seqid) + args = alter_ischema_args() + args.rqst = rqst + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_alter_ischema(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = alter_ischema_result() + result.read(iprot) + iprot.readMessageEnd() + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + return + + def get_ischema(self, name): + """ + Parameters: + - name + + """ + self.send_get_ischema(name) + return self.recv_get_ischema() + + def send_get_ischema(self, name): + self._oprot.writeMessageBegin("get_ischema", TMessageType.CALL, self._seqid) + args = get_ischema_args() + args.name = name + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_get_ischema(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = get_ischema_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + raise TApplicationException(TApplicationException.MISSING_RESULT, "get_ischema failed: unknown result") + + def drop_ischema(self, name): + """ + Parameters: + - name + + """ + self.send_drop_ischema(name) + self.recv_drop_ischema() + + def send_drop_ischema(self, name): + self._oprot.writeMessageBegin("drop_ischema", TMessageType.CALL, self._seqid) + args = drop_ischema_args() + args.name = name + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_drop_ischema(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = drop_ischema_result() + result.read(iprot) + iprot.readMessageEnd() + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + if result.o3 is not None: + raise result.o3 + return + + def add_schema_version(self, schemaVersion): + """ + Parameters: + - schemaVersion + + """ + self.send_add_schema_version(schemaVersion) + self.recv_add_schema_version() + + def send_add_schema_version(self, schemaVersion): + self._oprot.writeMessageBegin("add_schema_version", TMessageType.CALL, self._seqid) + args = add_schema_version_args() + args.schemaVersion = schemaVersion + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_add_schema_version(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = add_schema_version_result() + result.read(iprot) + iprot.readMessageEnd() + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + if result.o3 is not None: + raise result.o3 + return + + def get_schema_version(self, schemaVersion): + """ + Parameters: + - schemaVersion + + """ + self.send_get_schema_version(schemaVersion) + return self.recv_get_schema_version() + + def send_get_schema_version(self, schemaVersion): + self._oprot.writeMessageBegin("get_schema_version", TMessageType.CALL, self._seqid) + args = get_schema_version_args() + args.schemaVersion = schemaVersion + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_get_schema_version(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = get_schema_version_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + raise TApplicationException(TApplicationException.MISSING_RESULT, "get_schema_version failed: unknown result") + + def get_schema_latest_version(self, schemaName): + """ + Parameters: + - schemaName + + """ + self.send_get_schema_latest_version(schemaName) + return self.recv_get_schema_latest_version() + + def send_get_schema_latest_version(self, schemaName): + self._oprot.writeMessageBegin("get_schema_latest_version", TMessageType.CALL, self._seqid) + args = get_schema_latest_version_args() + args.schemaName = schemaName + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_get_schema_latest_version(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = get_schema_latest_version_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + raise TApplicationException(TApplicationException.MISSING_RESULT, "get_schema_latest_version failed: unknown result") + + def get_schema_all_versions(self, schemaName): + """ + Parameters: + - schemaName + + """ + self.send_get_schema_all_versions(schemaName) + return self.recv_get_schema_all_versions() + + def send_get_schema_all_versions(self, schemaName): + self._oprot.writeMessageBegin("get_schema_all_versions", TMessageType.CALL, self._seqid) + args = get_schema_all_versions_args() + args.schemaName = schemaName + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_get_schema_all_versions(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = get_schema_all_versions_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + raise TApplicationException(TApplicationException.MISSING_RESULT, "get_schema_all_versions failed: unknown result") + + def drop_schema_version(self, schemaVersion): + """ + Parameters: + - schemaVersion + + """ + self.send_drop_schema_version(schemaVersion) + self.recv_drop_schema_version() + + def send_drop_schema_version(self, schemaVersion): + self._oprot.writeMessageBegin("drop_schema_version", TMessageType.CALL, self._seqid) + args = drop_schema_version_args() + args.schemaVersion = schemaVersion + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_drop_schema_version(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = drop_schema_version_result() + result.read(iprot) + iprot.readMessageEnd() + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + return + + def get_schemas_by_cols(self, rqst): + """ + Parameters: + - rqst + + """ + self.send_get_schemas_by_cols(rqst) + return self.recv_get_schemas_by_cols() + + def send_get_schemas_by_cols(self, rqst): + self._oprot.writeMessageBegin("get_schemas_by_cols", TMessageType.CALL, self._seqid) + args = get_schemas_by_cols_args() + args.rqst = rqst + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_get_schemas_by_cols(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = get_schemas_by_cols_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + raise TApplicationException(TApplicationException.MISSING_RESULT, "get_schemas_by_cols failed: unknown result") + + def map_schema_version_to_serde(self, rqst): + """ + Parameters: + - rqst + + """ + self.send_map_schema_version_to_serde(rqst) + self.recv_map_schema_version_to_serde() + + def send_map_schema_version_to_serde(self, rqst): + self._oprot.writeMessageBegin("map_schema_version_to_serde", TMessageType.CALL, self._seqid) + args = map_schema_version_to_serde_args() + args.rqst = rqst + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_map_schema_version_to_serde(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = map_schema_version_to_serde_result() + result.read(iprot) + iprot.readMessageEnd() + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + return + + def set_schema_version_state(self, rqst): + """ + Parameters: + - rqst + + """ + self.send_set_schema_version_state(rqst) + self.recv_set_schema_version_state() + + def send_set_schema_version_state(self, rqst): + self._oprot.writeMessageBegin("set_schema_version_state", TMessageType.CALL, self._seqid) + args = set_schema_version_state_args() + args.rqst = rqst + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_set_schema_version_state(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = set_schema_version_state_result() + result.read(iprot) + iprot.readMessageEnd() + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + if result.o3 is not None: + raise result.o3 + return + + def add_serde(self, serde): + """ + Parameters: + - serde + + """ + self.send_add_serde(serde) + self.recv_add_serde() + + def send_add_serde(self, serde): + self._oprot.writeMessageBegin("add_serde", TMessageType.CALL, self._seqid) + args = add_serde_args() + args.serde = serde + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_add_serde(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = add_serde_result() + result.read(iprot) + iprot.readMessageEnd() + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + return + + def get_serde(self, rqst): + """ + Parameters: + - rqst + + """ + self.send_get_serde(rqst) + return self.recv_get_serde() + + def send_get_serde(self, rqst): + self._oprot.writeMessageBegin("get_serde", TMessageType.CALL, self._seqid) + args = get_serde_args() + args.rqst = rqst + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_get_serde(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = get_serde_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + raise TApplicationException(TApplicationException.MISSING_RESULT, "get_serde failed: unknown result") + + def get_lock_materialization_rebuild(self, dbName, tableName, txnId): + """ + Parameters: + - dbName + - tableName + - txnId + + """ + self.send_get_lock_materialization_rebuild(dbName, tableName, txnId) + return self.recv_get_lock_materialization_rebuild() + + def send_get_lock_materialization_rebuild(self, dbName, tableName, txnId): + self._oprot.writeMessageBegin("get_lock_materialization_rebuild", TMessageType.CALL, self._seqid) + args = get_lock_materialization_rebuild_args() + args.dbName = dbName + args.tableName = tableName + args.txnId = txnId + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_get_lock_materialization_rebuild(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = get_lock_materialization_rebuild_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + raise TApplicationException( + TApplicationException.MISSING_RESULT, "get_lock_materialization_rebuild failed: unknown result" + ) + + def heartbeat_lock_materialization_rebuild(self, dbName, tableName, txnId): + """ + Parameters: + - dbName + - tableName + - txnId + + """ + self.send_heartbeat_lock_materialization_rebuild(dbName, tableName, txnId) + return self.recv_heartbeat_lock_materialization_rebuild() + + def send_heartbeat_lock_materialization_rebuild(self, dbName, tableName, txnId): + self._oprot.writeMessageBegin("heartbeat_lock_materialization_rebuild", TMessageType.CALL, self._seqid) + args = heartbeat_lock_materialization_rebuild_args() + args.dbName = dbName + args.tableName = tableName + args.txnId = txnId + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_heartbeat_lock_materialization_rebuild(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = heartbeat_lock_materialization_rebuild_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + raise TApplicationException( + TApplicationException.MISSING_RESULT, "heartbeat_lock_materialization_rebuild failed: unknown result" + ) + + def add_runtime_stats(self, stat): + """ + Parameters: + - stat + + """ + self.send_add_runtime_stats(stat) + self.recv_add_runtime_stats() + + def send_add_runtime_stats(self, stat): + self._oprot.writeMessageBegin("add_runtime_stats", TMessageType.CALL, self._seqid) + args = add_runtime_stats_args() + args.stat = stat + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_add_runtime_stats(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = add_runtime_stats_result() + result.read(iprot) + iprot.readMessageEnd() + if result.o1 is not None: + raise result.o1 + return + + def get_runtime_stats(self, rqst): + """ + Parameters: + - rqst + + """ + self.send_get_runtime_stats(rqst) + return self.recv_get_runtime_stats() + + def send_get_runtime_stats(self, rqst): + self._oprot.writeMessageBegin("get_runtime_stats", TMessageType.CALL, self._seqid) + args = get_runtime_stats_args() + args.rqst = rqst + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_get_runtime_stats(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = get_runtime_stats_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + raise TApplicationException(TApplicationException.MISSING_RESULT, "get_runtime_stats failed: unknown result") + + def get_partitions_with_specs(self, request): + """ + Parameters: + - request + + """ + self.send_get_partitions_with_specs(request) + return self.recv_get_partitions_with_specs() + + def send_get_partitions_with_specs(self, request): + self._oprot.writeMessageBegin("get_partitions_with_specs", TMessageType.CALL, self._seqid) + args = get_partitions_with_specs_args() + args.request = request + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_get_partitions_with_specs(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = get_partitions_with_specs_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + raise TApplicationException(TApplicationException.MISSING_RESULT, "get_partitions_with_specs failed: unknown result") + + def scheduled_query_poll(self, request): + """ + Parameters: + - request + + """ + self.send_scheduled_query_poll(request) + return self.recv_scheduled_query_poll() + + def send_scheduled_query_poll(self, request): + self._oprot.writeMessageBegin("scheduled_query_poll", TMessageType.CALL, self._seqid) + args = scheduled_query_poll_args() + args.request = request + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_scheduled_query_poll(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = scheduled_query_poll_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + raise TApplicationException(TApplicationException.MISSING_RESULT, "scheduled_query_poll failed: unknown result") + + def scheduled_query_maintenance(self, request): + """ + Parameters: + - request + + """ + self.send_scheduled_query_maintenance(request) + self.recv_scheduled_query_maintenance() + + def send_scheduled_query_maintenance(self, request): + self._oprot.writeMessageBegin("scheduled_query_maintenance", TMessageType.CALL, self._seqid) + args = scheduled_query_maintenance_args() + args.request = request + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_scheduled_query_maintenance(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = scheduled_query_maintenance_result() + result.read(iprot) + iprot.readMessageEnd() + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + if result.o3 is not None: + raise result.o3 + if result.o4 is not None: + raise result.o4 + return + + def scheduled_query_progress(self, info): + """ + Parameters: + - info + + """ + self.send_scheduled_query_progress(info) + self.recv_scheduled_query_progress() + + def send_scheduled_query_progress(self, info): + self._oprot.writeMessageBegin("scheduled_query_progress", TMessageType.CALL, self._seqid) + args = scheduled_query_progress_args() + args.info = info + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_scheduled_query_progress(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = scheduled_query_progress_result() + result.read(iprot) + iprot.readMessageEnd() + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + return + + def get_scheduled_query(self, scheduleKey): + """ + Parameters: + - scheduleKey + + """ + self.send_get_scheduled_query(scheduleKey) + return self.recv_get_scheduled_query() + + def send_get_scheduled_query(self, scheduleKey): + self._oprot.writeMessageBegin("get_scheduled_query", TMessageType.CALL, self._seqid) + args = get_scheduled_query_args() + args.scheduleKey = scheduleKey + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_get_scheduled_query(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = get_scheduled_query_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + raise TApplicationException(TApplicationException.MISSING_RESULT, "get_scheduled_query failed: unknown result") + + def add_replication_metrics(self, replicationMetricList): + """ + Parameters: + - replicationMetricList + + """ + self.send_add_replication_metrics(replicationMetricList) + self.recv_add_replication_metrics() + + def send_add_replication_metrics(self, replicationMetricList): + self._oprot.writeMessageBegin("add_replication_metrics", TMessageType.CALL, self._seqid) + args = add_replication_metrics_args() + args.replicationMetricList = replicationMetricList + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_add_replication_metrics(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = add_replication_metrics_result() + result.read(iprot) + iprot.readMessageEnd() + if result.o1 is not None: + raise result.o1 + return + + def get_replication_metrics(self, rqst): + """ + Parameters: + - rqst + + """ + self.send_get_replication_metrics(rqst) + return self.recv_get_replication_metrics() + + def send_get_replication_metrics(self, rqst): + self._oprot.writeMessageBegin("get_replication_metrics", TMessageType.CALL, self._seqid) + args = get_replication_metrics_args() + args.rqst = rqst + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_get_replication_metrics(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = get_replication_metrics_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + raise TApplicationException(TApplicationException.MISSING_RESULT, "get_replication_metrics failed: unknown result") + + def get_open_txns_req(self, getOpenTxnsRequest): + """ + Parameters: + - getOpenTxnsRequest + + """ + self.send_get_open_txns_req(getOpenTxnsRequest) + return self.recv_get_open_txns_req() + + def send_get_open_txns_req(self, getOpenTxnsRequest): + self._oprot.writeMessageBegin("get_open_txns_req", TMessageType.CALL, self._seqid) + args = get_open_txns_req_args() + args.getOpenTxnsRequest = getOpenTxnsRequest + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_get_open_txns_req(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = get_open_txns_req_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + raise TApplicationException(TApplicationException.MISSING_RESULT, "get_open_txns_req failed: unknown result") + + def create_stored_procedure(self, proc): + """ + Parameters: + - proc + + """ + self.send_create_stored_procedure(proc) + self.recv_create_stored_procedure() + + def send_create_stored_procedure(self, proc): + self._oprot.writeMessageBegin("create_stored_procedure", TMessageType.CALL, self._seqid) + args = create_stored_procedure_args() + args.proc = proc + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_create_stored_procedure(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = create_stored_procedure_result() + result.read(iprot) + iprot.readMessageEnd() + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + return + + def get_stored_procedure(self, request): + """ + Parameters: + - request + + """ + self.send_get_stored_procedure(request) + return self.recv_get_stored_procedure() + + def send_get_stored_procedure(self, request): + self._oprot.writeMessageBegin("get_stored_procedure", TMessageType.CALL, self._seqid) + args = get_stored_procedure_args() + args.request = request + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_get_stored_procedure(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = get_stored_procedure_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + raise TApplicationException(TApplicationException.MISSING_RESULT, "get_stored_procedure failed: unknown result") + + def drop_stored_procedure(self, request): + """ + Parameters: + - request + + """ + self.send_drop_stored_procedure(request) + self.recv_drop_stored_procedure() + + def send_drop_stored_procedure(self, request): + self._oprot.writeMessageBegin("drop_stored_procedure", TMessageType.CALL, self._seqid) + args = drop_stored_procedure_args() + args.request = request + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_drop_stored_procedure(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = drop_stored_procedure_result() + result.read(iprot) + iprot.readMessageEnd() + if result.o1 is not None: + raise result.o1 + return + + def get_all_stored_procedures(self, request): + """ + Parameters: + - request + + """ + self.send_get_all_stored_procedures(request) + return self.recv_get_all_stored_procedures() + + def send_get_all_stored_procedures(self, request): + self._oprot.writeMessageBegin("get_all_stored_procedures", TMessageType.CALL, self._seqid) + args = get_all_stored_procedures_args() + args.request = request + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_get_all_stored_procedures(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = get_all_stored_procedures_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + raise TApplicationException(TApplicationException.MISSING_RESULT, "get_all_stored_procedures failed: unknown result") + + def find_package(self, request): + """ + Parameters: + - request + + """ + self.send_find_package(request) + return self.recv_find_package() + + def send_find_package(self, request): + self._oprot.writeMessageBegin("find_package", TMessageType.CALL, self._seqid) + args = find_package_args() + args.request = request + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_find_package(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = find_package_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + raise TApplicationException(TApplicationException.MISSING_RESULT, "find_package failed: unknown result") + + def add_package(self, request): + """ + Parameters: + - request + + """ + self.send_add_package(request) + self.recv_add_package() + + def send_add_package(self, request): + self._oprot.writeMessageBegin("add_package", TMessageType.CALL, self._seqid) + args = add_package_args() + args.request = request + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_add_package(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = add_package_result() + result.read(iprot) + iprot.readMessageEnd() + if result.o1 is not None: + raise result.o1 + return + + def get_all_packages(self, request): + """ + Parameters: + - request + + """ + self.send_get_all_packages(request) + return self.recv_get_all_packages() + + def send_get_all_packages(self, request): + self._oprot.writeMessageBegin("get_all_packages", TMessageType.CALL, self._seqid) + args = get_all_packages_args() + args.request = request + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_get_all_packages(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = get_all_packages_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + raise TApplicationException(TApplicationException.MISSING_RESULT, "get_all_packages failed: unknown result") + + def drop_package(self, request): + """ + Parameters: + - request + + """ + self.send_drop_package(request) + self.recv_drop_package() + + def send_drop_package(self, request): + self._oprot.writeMessageBegin("drop_package", TMessageType.CALL, self._seqid) + args = drop_package_args() + args.request = request + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_drop_package(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = drop_package_result() + result.read(iprot) + iprot.readMessageEnd() + if result.o1 is not None: + raise result.o1 + return + + def get_all_write_event_info(self, request): + """ + Parameters: + - request + + """ + self.send_get_all_write_event_info(request) + return self.recv_get_all_write_event_info() + + def send_get_all_write_event_info(self, request): + self._oprot.writeMessageBegin("get_all_write_event_info", TMessageType.CALL, self._seqid) + args = get_all_write_event_info_args() + args.request = request + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_get_all_write_event_info(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = get_all_write_event_info_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + raise TApplicationException(TApplicationException.MISSING_RESULT, "get_all_write_event_info failed: unknown result") + + +class Processor(fb303.FacebookService.Processor, Iface, TProcessor): + def __init__(self, handler): + fb303.FacebookService.Processor.__init__(self, handler) + self._processMap["getMetaConf"] = Processor.process_getMetaConf + self._processMap["setMetaConf"] = Processor.process_setMetaConf + self._processMap["create_catalog"] = Processor.process_create_catalog + self._processMap["alter_catalog"] = Processor.process_alter_catalog + self._processMap["get_catalog"] = Processor.process_get_catalog + self._processMap["get_catalogs"] = Processor.process_get_catalogs + self._processMap["drop_catalog"] = Processor.process_drop_catalog + self._processMap["create_database"] = Processor.process_create_database + self._processMap["get_database"] = Processor.process_get_database + self._processMap["get_database_req"] = Processor.process_get_database_req + self._processMap["drop_database"] = Processor.process_drop_database + self._processMap["drop_database_req"] = Processor.process_drop_database_req + self._processMap["get_databases"] = Processor.process_get_databases + self._processMap["get_all_databases"] = Processor.process_get_all_databases + self._processMap["alter_database"] = Processor.process_alter_database + self._processMap["create_dataconnector"] = Processor.process_create_dataconnector + self._processMap["get_dataconnector_req"] = Processor.process_get_dataconnector_req + self._processMap["drop_dataconnector"] = Processor.process_drop_dataconnector + self._processMap["get_dataconnectors"] = Processor.process_get_dataconnectors + self._processMap["alter_dataconnector"] = Processor.process_alter_dataconnector + self._processMap["get_type"] = Processor.process_get_type + self._processMap["create_type"] = Processor.process_create_type + self._processMap["drop_type"] = Processor.process_drop_type + self._processMap["get_type_all"] = Processor.process_get_type_all + self._processMap["get_fields"] = Processor.process_get_fields + self._processMap["get_fields_with_environment_context"] = Processor.process_get_fields_with_environment_context + self._processMap["get_fields_req"] = Processor.process_get_fields_req + self._processMap["get_schema"] = Processor.process_get_schema + self._processMap["get_schema_with_environment_context"] = Processor.process_get_schema_with_environment_context + self._processMap["get_schema_req"] = Processor.process_get_schema_req + self._processMap["create_table"] = Processor.process_create_table + self._processMap["create_table_with_environment_context"] = Processor.process_create_table_with_environment_context + self._processMap["create_table_with_constraints"] = Processor.process_create_table_with_constraints + self._processMap["create_table_req"] = Processor.process_create_table_req + self._processMap["drop_constraint"] = Processor.process_drop_constraint + self._processMap["add_primary_key"] = Processor.process_add_primary_key + self._processMap["add_foreign_key"] = Processor.process_add_foreign_key + self._processMap["add_unique_constraint"] = Processor.process_add_unique_constraint + self._processMap["add_not_null_constraint"] = Processor.process_add_not_null_constraint + self._processMap["add_default_constraint"] = Processor.process_add_default_constraint + self._processMap["add_check_constraint"] = Processor.process_add_check_constraint + self._processMap["translate_table_dryrun"] = Processor.process_translate_table_dryrun + self._processMap["drop_table"] = Processor.process_drop_table + self._processMap["drop_table_with_environment_context"] = Processor.process_drop_table_with_environment_context + self._processMap["truncate_table"] = Processor.process_truncate_table + self._processMap["truncate_table_req"] = Processor.process_truncate_table_req + self._processMap["get_tables"] = Processor.process_get_tables + self._processMap["get_tables_by_type"] = Processor.process_get_tables_by_type + self._processMap[ + "get_all_materialized_view_objects_for_rewriting" + ] = Processor.process_get_all_materialized_view_objects_for_rewriting + self._processMap["get_materialized_views_for_rewriting"] = Processor.process_get_materialized_views_for_rewriting + self._processMap["get_table_meta"] = Processor.process_get_table_meta + self._processMap["get_all_tables"] = Processor.process_get_all_tables + self._processMap["get_table"] = Processor.process_get_table + self._processMap["get_table_objects_by_name"] = Processor.process_get_table_objects_by_name + self._processMap["get_tables_ext"] = Processor.process_get_tables_ext + self._processMap["get_table_req"] = Processor.process_get_table_req + self._processMap["get_table_objects_by_name_req"] = Processor.process_get_table_objects_by_name_req + self._processMap["get_materialization_invalidation_info"] = Processor.process_get_materialization_invalidation_info + self._processMap["update_creation_metadata"] = Processor.process_update_creation_metadata + self._processMap["get_table_names_by_filter"] = Processor.process_get_table_names_by_filter + self._processMap["alter_table"] = Processor.process_alter_table + self._processMap["alter_table_with_environment_context"] = Processor.process_alter_table_with_environment_context + self._processMap["alter_table_with_cascade"] = Processor.process_alter_table_with_cascade + self._processMap["alter_table_req"] = Processor.process_alter_table_req + self._processMap["add_partition"] = Processor.process_add_partition + self._processMap["add_partition_with_environment_context"] = Processor.process_add_partition_with_environment_context + self._processMap["add_partitions"] = Processor.process_add_partitions + self._processMap["add_partitions_pspec"] = Processor.process_add_partitions_pspec + self._processMap["append_partition"] = Processor.process_append_partition + self._processMap["add_partitions_req"] = Processor.process_add_partitions_req + self._processMap[ + "append_partition_with_environment_context" + ] = Processor.process_append_partition_with_environment_context + self._processMap["append_partition_by_name"] = Processor.process_append_partition_by_name + self._processMap[ + "append_partition_by_name_with_environment_context" + ] = Processor.process_append_partition_by_name_with_environment_context + self._processMap["drop_partition"] = Processor.process_drop_partition + self._processMap["drop_partition_with_environment_context"] = Processor.process_drop_partition_with_environment_context + self._processMap["drop_partition_by_name"] = Processor.process_drop_partition_by_name + self._processMap[ + "drop_partition_by_name_with_environment_context" + ] = Processor.process_drop_partition_by_name_with_environment_context + self._processMap["drop_partitions_req"] = Processor.process_drop_partitions_req + self._processMap["get_partition"] = Processor.process_get_partition + self._processMap["get_partition_req"] = Processor.process_get_partition_req + self._processMap["exchange_partition"] = Processor.process_exchange_partition + self._processMap["exchange_partitions"] = Processor.process_exchange_partitions + self._processMap["get_partition_with_auth"] = Processor.process_get_partition_with_auth + self._processMap["get_partition_by_name"] = Processor.process_get_partition_by_name + self._processMap["get_partitions"] = Processor.process_get_partitions + self._processMap["get_partitions_req"] = Processor.process_get_partitions_req + self._processMap["get_partitions_with_auth"] = Processor.process_get_partitions_with_auth + self._processMap["get_partitions_pspec"] = Processor.process_get_partitions_pspec + self._processMap["get_partition_names"] = Processor.process_get_partition_names + self._processMap["get_partition_values"] = Processor.process_get_partition_values + self._processMap["get_partitions_ps"] = Processor.process_get_partitions_ps + self._processMap["get_partitions_ps_with_auth"] = Processor.process_get_partitions_ps_with_auth + self._processMap["get_partitions_ps_with_auth_req"] = Processor.process_get_partitions_ps_with_auth_req + self._processMap["get_partition_names_ps"] = Processor.process_get_partition_names_ps + self._processMap["get_partition_names_ps_req"] = Processor.process_get_partition_names_ps_req + self._processMap["get_partition_names_req"] = Processor.process_get_partition_names_req + self._processMap["get_partitions_by_filter"] = Processor.process_get_partitions_by_filter + self._processMap["get_part_specs_by_filter"] = Processor.process_get_part_specs_by_filter + self._processMap["get_partitions_by_expr"] = Processor.process_get_partitions_by_expr + self._processMap["get_partitions_spec_by_expr"] = Processor.process_get_partitions_spec_by_expr + self._processMap["get_num_partitions_by_filter"] = Processor.process_get_num_partitions_by_filter + self._processMap["get_partitions_by_names"] = Processor.process_get_partitions_by_names + self._processMap["get_partitions_by_names_req"] = Processor.process_get_partitions_by_names_req + self._processMap["alter_partition"] = Processor.process_alter_partition + self._processMap["alter_partitions"] = Processor.process_alter_partitions + self._processMap[ + "alter_partitions_with_environment_context" + ] = Processor.process_alter_partitions_with_environment_context + self._processMap["alter_partitions_req"] = Processor.process_alter_partitions_req + self._processMap["alter_partition_with_environment_context"] = Processor.process_alter_partition_with_environment_context + self._processMap["rename_partition"] = Processor.process_rename_partition + self._processMap["rename_partition_req"] = Processor.process_rename_partition_req + self._processMap["partition_name_has_valid_characters"] = Processor.process_partition_name_has_valid_characters + self._processMap["get_config_value"] = Processor.process_get_config_value + self._processMap["partition_name_to_vals"] = Processor.process_partition_name_to_vals + self._processMap["partition_name_to_spec"] = Processor.process_partition_name_to_spec + self._processMap["markPartitionForEvent"] = Processor.process_markPartitionForEvent + self._processMap["isPartitionMarkedForEvent"] = Processor.process_isPartitionMarkedForEvent + self._processMap["get_primary_keys"] = Processor.process_get_primary_keys + self._processMap["get_foreign_keys"] = Processor.process_get_foreign_keys + self._processMap["get_unique_constraints"] = Processor.process_get_unique_constraints + self._processMap["get_not_null_constraints"] = Processor.process_get_not_null_constraints + self._processMap["get_default_constraints"] = Processor.process_get_default_constraints + self._processMap["get_check_constraints"] = Processor.process_get_check_constraints + self._processMap["get_all_table_constraints"] = Processor.process_get_all_table_constraints + self._processMap["update_table_column_statistics"] = Processor.process_update_table_column_statistics + self._processMap["update_partition_column_statistics"] = Processor.process_update_partition_column_statistics + self._processMap["update_table_column_statistics_req"] = Processor.process_update_table_column_statistics_req + self._processMap["update_partition_column_statistics_req"] = Processor.process_update_partition_column_statistics_req + self._processMap["update_transaction_statistics"] = Processor.process_update_transaction_statistics + self._processMap["get_table_column_statistics"] = Processor.process_get_table_column_statistics + self._processMap["get_partition_column_statistics"] = Processor.process_get_partition_column_statistics + self._processMap["get_table_statistics_req"] = Processor.process_get_table_statistics_req + self._processMap["get_partitions_statistics_req"] = Processor.process_get_partitions_statistics_req + self._processMap["get_aggr_stats_for"] = Processor.process_get_aggr_stats_for + self._processMap["set_aggr_stats_for"] = Processor.process_set_aggr_stats_for + self._processMap["delete_partition_column_statistics"] = Processor.process_delete_partition_column_statistics + self._processMap["delete_table_column_statistics"] = Processor.process_delete_table_column_statistics + self._processMap["create_function"] = Processor.process_create_function + self._processMap["drop_function"] = Processor.process_drop_function + self._processMap["alter_function"] = Processor.process_alter_function + self._processMap["get_functions"] = Processor.process_get_functions + self._processMap["get_function"] = Processor.process_get_function + self._processMap["get_all_functions"] = Processor.process_get_all_functions + self._processMap["create_role"] = Processor.process_create_role + self._processMap["drop_role"] = Processor.process_drop_role + self._processMap["get_role_names"] = Processor.process_get_role_names + self._processMap["grant_role"] = Processor.process_grant_role + self._processMap["revoke_role"] = Processor.process_revoke_role + self._processMap["list_roles"] = Processor.process_list_roles + self._processMap["grant_revoke_role"] = Processor.process_grant_revoke_role + self._processMap["get_principals_in_role"] = Processor.process_get_principals_in_role + self._processMap["get_role_grants_for_principal"] = Processor.process_get_role_grants_for_principal + self._processMap["get_privilege_set"] = Processor.process_get_privilege_set + self._processMap["list_privileges"] = Processor.process_list_privileges + self._processMap["grant_privileges"] = Processor.process_grant_privileges + self._processMap["revoke_privileges"] = Processor.process_revoke_privileges + self._processMap["grant_revoke_privileges"] = Processor.process_grant_revoke_privileges + self._processMap["refresh_privileges"] = Processor.process_refresh_privileges + self._processMap["set_ugi"] = Processor.process_set_ugi + self._processMap["get_delegation_token"] = Processor.process_get_delegation_token + self._processMap["renew_delegation_token"] = Processor.process_renew_delegation_token + self._processMap["cancel_delegation_token"] = Processor.process_cancel_delegation_token + self._processMap["add_token"] = Processor.process_add_token + self._processMap["remove_token"] = Processor.process_remove_token + self._processMap["get_token"] = Processor.process_get_token + self._processMap["get_all_token_identifiers"] = Processor.process_get_all_token_identifiers + self._processMap["add_master_key"] = Processor.process_add_master_key + self._processMap["update_master_key"] = Processor.process_update_master_key + self._processMap["remove_master_key"] = Processor.process_remove_master_key + self._processMap["get_master_keys"] = Processor.process_get_master_keys + self._processMap["get_open_txns"] = Processor.process_get_open_txns + self._processMap["get_open_txns_info"] = Processor.process_get_open_txns_info + self._processMap["open_txns"] = Processor.process_open_txns + self._processMap["abort_txn"] = Processor.process_abort_txn + self._processMap["abort_txns"] = Processor.process_abort_txns + self._processMap["commit_txn"] = Processor.process_commit_txn + self._processMap["get_latest_txnid_in_conflict"] = Processor.process_get_latest_txnid_in_conflict + self._processMap["repl_tbl_writeid_state"] = Processor.process_repl_tbl_writeid_state + self._processMap["get_valid_write_ids"] = Processor.process_get_valid_write_ids + self._processMap["allocate_table_write_ids"] = Processor.process_allocate_table_write_ids + self._processMap["get_max_allocated_table_write_id"] = Processor.process_get_max_allocated_table_write_id + self._processMap["seed_write_id"] = Processor.process_seed_write_id + self._processMap["seed_txn_id"] = Processor.process_seed_txn_id + self._processMap["lock"] = Processor.process_lock + self._processMap["check_lock"] = Processor.process_check_lock + self._processMap["unlock"] = Processor.process_unlock + self._processMap["show_locks"] = Processor.process_show_locks + self._processMap["heartbeat"] = Processor.process_heartbeat + self._processMap["heartbeat_txn_range"] = Processor.process_heartbeat_txn_range + self._processMap["compact"] = Processor.process_compact + self._processMap["compact2"] = Processor.process_compact2 + self._processMap["show_compact"] = Processor.process_show_compact + self._processMap["add_dynamic_partitions"] = Processor.process_add_dynamic_partitions + self._processMap["find_next_compact"] = Processor.process_find_next_compact + self._processMap["find_next_compact2"] = Processor.process_find_next_compact2 + self._processMap["update_compactor_state"] = Processor.process_update_compactor_state + self._processMap["find_columns_with_stats"] = Processor.process_find_columns_with_stats + self._processMap["mark_cleaned"] = Processor.process_mark_cleaned + self._processMap["mark_compacted"] = Processor.process_mark_compacted + self._processMap["mark_failed"] = Processor.process_mark_failed + self._processMap["mark_refused"] = Processor.process_mark_refused + self._processMap["update_compaction_metrics_data"] = Processor.process_update_compaction_metrics_data + self._processMap["remove_compaction_metrics_data"] = Processor.process_remove_compaction_metrics_data + self._processMap["set_hadoop_jobid"] = Processor.process_set_hadoop_jobid + self._processMap["get_latest_committed_compaction_info"] = Processor.process_get_latest_committed_compaction_info + self._processMap["get_next_notification"] = Processor.process_get_next_notification + self._processMap["get_current_notificationEventId"] = Processor.process_get_current_notificationEventId + self._processMap["get_notification_events_count"] = Processor.process_get_notification_events_count + self._processMap["fire_listener_event"] = Processor.process_fire_listener_event + self._processMap["flushCache"] = Processor.process_flushCache + self._processMap["add_write_notification_log"] = Processor.process_add_write_notification_log + self._processMap["add_write_notification_log_in_batch"] = Processor.process_add_write_notification_log_in_batch + self._processMap["cm_recycle"] = Processor.process_cm_recycle + self._processMap["get_file_metadata_by_expr"] = Processor.process_get_file_metadata_by_expr + self._processMap["get_file_metadata"] = Processor.process_get_file_metadata + self._processMap["put_file_metadata"] = Processor.process_put_file_metadata + self._processMap["clear_file_metadata"] = Processor.process_clear_file_metadata + self._processMap["cache_file_metadata"] = Processor.process_cache_file_metadata + self._processMap["get_metastore_db_uuid"] = Processor.process_get_metastore_db_uuid + self._processMap["create_resource_plan"] = Processor.process_create_resource_plan + self._processMap["get_resource_plan"] = Processor.process_get_resource_plan + self._processMap["get_active_resource_plan"] = Processor.process_get_active_resource_plan + self._processMap["get_all_resource_plans"] = Processor.process_get_all_resource_plans + self._processMap["alter_resource_plan"] = Processor.process_alter_resource_plan + self._processMap["validate_resource_plan"] = Processor.process_validate_resource_plan + self._processMap["drop_resource_plan"] = Processor.process_drop_resource_plan + self._processMap["create_wm_trigger"] = Processor.process_create_wm_trigger + self._processMap["alter_wm_trigger"] = Processor.process_alter_wm_trigger + self._processMap["drop_wm_trigger"] = Processor.process_drop_wm_trigger + self._processMap["get_triggers_for_resourceplan"] = Processor.process_get_triggers_for_resourceplan + self._processMap["create_wm_pool"] = Processor.process_create_wm_pool + self._processMap["alter_wm_pool"] = Processor.process_alter_wm_pool + self._processMap["drop_wm_pool"] = Processor.process_drop_wm_pool + self._processMap["create_or_update_wm_mapping"] = Processor.process_create_or_update_wm_mapping + self._processMap["drop_wm_mapping"] = Processor.process_drop_wm_mapping + self._processMap[ + "create_or_drop_wm_trigger_to_pool_mapping" + ] = Processor.process_create_or_drop_wm_trigger_to_pool_mapping + self._processMap["create_ischema"] = Processor.process_create_ischema + self._processMap["alter_ischema"] = Processor.process_alter_ischema + self._processMap["get_ischema"] = Processor.process_get_ischema + self._processMap["drop_ischema"] = Processor.process_drop_ischema + self._processMap["add_schema_version"] = Processor.process_add_schema_version + self._processMap["get_schema_version"] = Processor.process_get_schema_version + self._processMap["get_schema_latest_version"] = Processor.process_get_schema_latest_version + self._processMap["get_schema_all_versions"] = Processor.process_get_schema_all_versions + self._processMap["drop_schema_version"] = Processor.process_drop_schema_version + self._processMap["get_schemas_by_cols"] = Processor.process_get_schemas_by_cols + self._processMap["map_schema_version_to_serde"] = Processor.process_map_schema_version_to_serde + self._processMap["set_schema_version_state"] = Processor.process_set_schema_version_state + self._processMap["add_serde"] = Processor.process_add_serde + self._processMap["get_serde"] = Processor.process_get_serde + self._processMap["get_lock_materialization_rebuild"] = Processor.process_get_lock_materialization_rebuild + self._processMap["heartbeat_lock_materialization_rebuild"] = Processor.process_heartbeat_lock_materialization_rebuild + self._processMap["add_runtime_stats"] = Processor.process_add_runtime_stats + self._processMap["get_runtime_stats"] = Processor.process_get_runtime_stats + self._processMap["get_partitions_with_specs"] = Processor.process_get_partitions_with_specs + self._processMap["scheduled_query_poll"] = Processor.process_scheduled_query_poll + self._processMap["scheduled_query_maintenance"] = Processor.process_scheduled_query_maintenance + self._processMap["scheduled_query_progress"] = Processor.process_scheduled_query_progress + self._processMap["get_scheduled_query"] = Processor.process_get_scheduled_query + self._processMap["add_replication_metrics"] = Processor.process_add_replication_metrics + self._processMap["get_replication_metrics"] = Processor.process_get_replication_metrics + self._processMap["get_open_txns_req"] = Processor.process_get_open_txns_req + self._processMap["create_stored_procedure"] = Processor.process_create_stored_procedure + self._processMap["get_stored_procedure"] = Processor.process_get_stored_procedure + self._processMap["drop_stored_procedure"] = Processor.process_drop_stored_procedure + self._processMap["get_all_stored_procedures"] = Processor.process_get_all_stored_procedures + self._processMap["find_package"] = Processor.process_find_package + self._processMap["add_package"] = Processor.process_add_package + self._processMap["get_all_packages"] = Processor.process_get_all_packages + self._processMap["drop_package"] = Processor.process_drop_package + self._processMap["get_all_write_event_info"] = Processor.process_get_all_write_event_info + self._on_message_begin = None + + def on_message_begin(self, func): + self._on_message_begin = func + + def process(self, iprot, oprot): + (name, type, seqid) = iprot.readMessageBegin() + if self._on_message_begin: + self._on_message_begin(name, type, seqid) + if name not in self._processMap: + iprot.skip(TType.STRUCT) + iprot.readMessageEnd() + x = TApplicationException(TApplicationException.UNKNOWN_METHOD, "Unknown function %s" % (name)) + oprot.writeMessageBegin(name, TMessageType.EXCEPTION, seqid) + x.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + return + else: + self._processMap[name](self, seqid, iprot, oprot) + return True + + def process_getMetaConf(self, seqid, iprot, oprot): + args = getMetaConf_args() + args.read(iprot) + iprot.readMessageEnd() + result = getMetaConf_result() + try: + result.success = self._handler.getMetaConf(args.key) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except MetaException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("getMetaConf", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_setMetaConf(self, seqid, iprot, oprot): + args = setMetaConf_args() + args.read(iprot) + iprot.readMessageEnd() + result = setMetaConf_result() + try: + self._handler.setMetaConf(args.key, args.value) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except MetaException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("setMetaConf", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_create_catalog(self, seqid, iprot, oprot): + args = create_catalog_args() + args.read(iprot) + iprot.readMessageEnd() + result = create_catalog_result() + try: + self._handler.create_catalog(args.catalog) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except AlreadyExistsException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except InvalidObjectException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except MetaException as o3: + msg_type = TMessageType.REPLY + result.o3 = o3 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("create_catalog", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_alter_catalog(self, seqid, iprot, oprot): + args = alter_catalog_args() + args.read(iprot) + iprot.readMessageEnd() + result = alter_catalog_result() + try: + self._handler.alter_catalog(args.rqst) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except NoSuchObjectException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except InvalidOperationException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except MetaException as o3: + msg_type = TMessageType.REPLY + result.o3 = o3 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("alter_catalog", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_get_catalog(self, seqid, iprot, oprot): + args = get_catalog_args() + args.read(iprot) + iprot.readMessageEnd() + result = get_catalog_result() + try: + result.success = self._handler.get_catalog(args.catName) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except NoSuchObjectException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except MetaException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("get_catalog", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_get_catalogs(self, seqid, iprot, oprot): + args = get_catalogs_args() + args.read(iprot) + iprot.readMessageEnd() + result = get_catalogs_result() + try: + result.success = self._handler.get_catalogs() + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except MetaException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("get_catalogs", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_drop_catalog(self, seqid, iprot, oprot): + args = drop_catalog_args() + args.read(iprot) + iprot.readMessageEnd() + result = drop_catalog_result() + try: + self._handler.drop_catalog(args.catName) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except NoSuchObjectException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except InvalidOperationException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except MetaException as o3: + msg_type = TMessageType.REPLY + result.o3 = o3 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("drop_catalog", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_create_database(self, seqid, iprot, oprot): + args = create_database_args() + args.read(iprot) + iprot.readMessageEnd() + result = create_database_result() + try: + self._handler.create_database(args.database) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except AlreadyExistsException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except InvalidObjectException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except MetaException as o3: + msg_type = TMessageType.REPLY + result.o3 = o3 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("create_database", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_get_database(self, seqid, iprot, oprot): + args = get_database_args() + args.read(iprot) + iprot.readMessageEnd() + result = get_database_result() + try: + result.success = self._handler.get_database(args.name) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except NoSuchObjectException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except MetaException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("get_database", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_get_database_req(self, seqid, iprot, oprot): + args = get_database_req_args() + args.read(iprot) + iprot.readMessageEnd() + result = get_database_req_result() + try: + result.success = self._handler.get_database_req(args.request) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except NoSuchObjectException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except MetaException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("get_database_req", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_drop_database(self, seqid, iprot, oprot): + args = drop_database_args() + args.read(iprot) + iprot.readMessageEnd() + result = drop_database_result() + try: + self._handler.drop_database(args.name, args.deleteData, args.cascade) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except NoSuchObjectException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except InvalidOperationException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except MetaException as o3: + msg_type = TMessageType.REPLY + result.o3 = o3 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("drop_database", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_drop_database_req(self, seqid, iprot, oprot): + args = drop_database_req_args() + args.read(iprot) + iprot.readMessageEnd() + result = drop_database_req_result() + try: + self._handler.drop_database_req(args.req) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except NoSuchObjectException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except InvalidOperationException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except MetaException as o3: + msg_type = TMessageType.REPLY + result.o3 = o3 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("drop_database_req", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_get_databases(self, seqid, iprot, oprot): + args = get_databases_args() + args.read(iprot) + iprot.readMessageEnd() + result = get_databases_result() + try: + result.success = self._handler.get_databases(args.pattern) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except MetaException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("get_databases", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_get_all_databases(self, seqid, iprot, oprot): + args = get_all_databases_args() + args.read(iprot) + iprot.readMessageEnd() + result = get_all_databases_result() + try: + result.success = self._handler.get_all_databases() + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except MetaException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("get_all_databases", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_alter_database(self, seqid, iprot, oprot): + args = alter_database_args() + args.read(iprot) + iprot.readMessageEnd() + result = alter_database_result() + try: + self._handler.alter_database(args.dbname, args.db) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except MetaException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except NoSuchObjectException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("alter_database", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_create_dataconnector(self, seqid, iprot, oprot): + args = create_dataconnector_args() + args.read(iprot) + iprot.readMessageEnd() + result = create_dataconnector_result() + try: + self._handler.create_dataconnector(args.connector) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except AlreadyExistsException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except InvalidObjectException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except MetaException as o3: + msg_type = TMessageType.REPLY + result.o3 = o3 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("create_dataconnector", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_get_dataconnector_req(self, seqid, iprot, oprot): + args = get_dataconnector_req_args() + args.read(iprot) + iprot.readMessageEnd() + result = get_dataconnector_req_result() + try: + result.success = self._handler.get_dataconnector_req(args.request) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except NoSuchObjectException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except MetaException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("get_dataconnector_req", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_drop_dataconnector(self, seqid, iprot, oprot): + args = drop_dataconnector_args() + args.read(iprot) + iprot.readMessageEnd() + result = drop_dataconnector_result() + try: + self._handler.drop_dataconnector(args.name, args.ifNotExists, args.checkReferences) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except NoSuchObjectException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except InvalidOperationException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except MetaException as o3: + msg_type = TMessageType.REPLY + result.o3 = o3 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("drop_dataconnector", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_get_dataconnectors(self, seqid, iprot, oprot): + args = get_dataconnectors_args() + args.read(iprot) + iprot.readMessageEnd() + result = get_dataconnectors_result() + try: + result.success = self._handler.get_dataconnectors() + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except MetaException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("get_dataconnectors", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_alter_dataconnector(self, seqid, iprot, oprot): + args = alter_dataconnector_args() + args.read(iprot) + iprot.readMessageEnd() + result = alter_dataconnector_result() + try: + self._handler.alter_dataconnector(args.name, args.connector) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except MetaException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except NoSuchObjectException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("alter_dataconnector", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_get_type(self, seqid, iprot, oprot): + args = get_type_args() + args.read(iprot) + iprot.readMessageEnd() + result = get_type_result() + try: + result.success = self._handler.get_type(args.name) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except MetaException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except NoSuchObjectException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("get_type", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_create_type(self, seqid, iprot, oprot): + args = create_type_args() + args.read(iprot) + iprot.readMessageEnd() + result = create_type_result() + try: + result.success = self._handler.create_type(args.type) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except AlreadyExistsException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except InvalidObjectException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except MetaException as o3: + msg_type = TMessageType.REPLY + result.o3 = o3 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("create_type", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_drop_type(self, seqid, iprot, oprot): + args = drop_type_args() + args.read(iprot) + iprot.readMessageEnd() + result = drop_type_result() + try: + result.success = self._handler.drop_type(args.type) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except MetaException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except NoSuchObjectException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("drop_type", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_get_type_all(self, seqid, iprot, oprot): + args = get_type_all_args() + args.read(iprot) + iprot.readMessageEnd() + result = get_type_all_result() + try: + result.success = self._handler.get_type_all(args.name) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except MetaException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("get_type_all", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_get_fields(self, seqid, iprot, oprot): + args = get_fields_args() + args.read(iprot) + iprot.readMessageEnd() + result = get_fields_result() + try: + result.success = self._handler.get_fields(args.db_name, args.table_name) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except MetaException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except UnknownTableException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except UnknownDBException as o3: + msg_type = TMessageType.REPLY + result.o3 = o3 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("get_fields", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_get_fields_with_environment_context(self, seqid, iprot, oprot): + args = get_fields_with_environment_context_args() + args.read(iprot) + iprot.readMessageEnd() + result = get_fields_with_environment_context_result() + try: + result.success = self._handler.get_fields_with_environment_context( + args.db_name, args.table_name, args.environment_context + ) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except MetaException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except UnknownTableException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except UnknownDBException as o3: + msg_type = TMessageType.REPLY + result.o3 = o3 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("get_fields_with_environment_context", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_get_fields_req(self, seqid, iprot, oprot): + args = get_fields_req_args() + args.read(iprot) + iprot.readMessageEnd() + result = get_fields_req_result() + try: + result.success = self._handler.get_fields_req(args.req) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except MetaException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except UnknownTableException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except UnknownDBException as o3: + msg_type = TMessageType.REPLY + result.o3 = o3 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("get_fields_req", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_get_schema(self, seqid, iprot, oprot): + args = get_schema_args() + args.read(iprot) + iprot.readMessageEnd() + result = get_schema_result() + try: + result.success = self._handler.get_schema(args.db_name, args.table_name) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except MetaException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except UnknownTableException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except UnknownDBException as o3: + msg_type = TMessageType.REPLY + result.o3 = o3 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("get_schema", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_get_schema_with_environment_context(self, seqid, iprot, oprot): + args = get_schema_with_environment_context_args() + args.read(iprot) + iprot.readMessageEnd() + result = get_schema_with_environment_context_result() + try: + result.success = self._handler.get_schema_with_environment_context( + args.db_name, args.table_name, args.environment_context + ) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except MetaException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except UnknownTableException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except UnknownDBException as o3: + msg_type = TMessageType.REPLY + result.o3 = o3 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("get_schema_with_environment_context", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_get_schema_req(self, seqid, iprot, oprot): + args = get_schema_req_args() + args.read(iprot) + iprot.readMessageEnd() + result = get_schema_req_result() + try: + result.success = self._handler.get_schema_req(args.req) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except MetaException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except UnknownTableException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except UnknownDBException as o3: + msg_type = TMessageType.REPLY + result.o3 = o3 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("get_schema_req", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_create_table(self, seqid, iprot, oprot): + args = create_table_args() + args.read(iprot) + iprot.readMessageEnd() + result = create_table_result() + try: + self._handler.create_table(args.tbl) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except AlreadyExistsException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except InvalidObjectException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except MetaException as o3: + msg_type = TMessageType.REPLY + result.o3 = o3 + except NoSuchObjectException as o4: + msg_type = TMessageType.REPLY + result.o4 = o4 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("create_table", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_create_table_with_environment_context(self, seqid, iprot, oprot): + args = create_table_with_environment_context_args() + args.read(iprot) + iprot.readMessageEnd() + result = create_table_with_environment_context_result() + try: + self._handler.create_table_with_environment_context(args.tbl, args.environment_context) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except AlreadyExistsException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except InvalidObjectException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except MetaException as o3: + msg_type = TMessageType.REPLY + result.o3 = o3 + except NoSuchObjectException as o4: + msg_type = TMessageType.REPLY + result.o4 = o4 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("create_table_with_environment_context", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_create_table_with_constraints(self, seqid, iprot, oprot): + args = create_table_with_constraints_args() + args.read(iprot) + iprot.readMessageEnd() + result = create_table_with_constraints_result() + try: + self._handler.create_table_with_constraints( + args.tbl, + args.primaryKeys, + args.foreignKeys, + args.uniqueConstraints, + args.notNullConstraints, + args.defaultConstraints, + args.checkConstraints, + ) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except AlreadyExistsException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except InvalidObjectException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except MetaException as o3: + msg_type = TMessageType.REPLY + result.o3 = o3 + except NoSuchObjectException as o4: + msg_type = TMessageType.REPLY + result.o4 = o4 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("create_table_with_constraints", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_create_table_req(self, seqid, iprot, oprot): + args = create_table_req_args() + args.read(iprot) + iprot.readMessageEnd() + result = create_table_req_result() + try: + self._handler.create_table_req(args.request) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except AlreadyExistsException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except InvalidObjectException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except MetaException as o3: + msg_type = TMessageType.REPLY + result.o3 = o3 + except NoSuchObjectException as o4: + msg_type = TMessageType.REPLY + result.o4 = o4 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("create_table_req", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_drop_constraint(self, seqid, iprot, oprot): + args = drop_constraint_args() + args.read(iprot) + iprot.readMessageEnd() + result = drop_constraint_result() + try: + self._handler.drop_constraint(args.req) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except NoSuchObjectException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except MetaException as o3: + msg_type = TMessageType.REPLY + result.o3 = o3 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("drop_constraint", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_add_primary_key(self, seqid, iprot, oprot): + args = add_primary_key_args() + args.read(iprot) + iprot.readMessageEnd() + result = add_primary_key_result() + try: + self._handler.add_primary_key(args.req) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except NoSuchObjectException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except MetaException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("add_primary_key", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_add_foreign_key(self, seqid, iprot, oprot): + args = add_foreign_key_args() + args.read(iprot) + iprot.readMessageEnd() + result = add_foreign_key_result() + try: + self._handler.add_foreign_key(args.req) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except NoSuchObjectException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except MetaException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("add_foreign_key", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_add_unique_constraint(self, seqid, iprot, oprot): + args = add_unique_constraint_args() + args.read(iprot) + iprot.readMessageEnd() + result = add_unique_constraint_result() + try: + self._handler.add_unique_constraint(args.req) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except NoSuchObjectException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except MetaException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("add_unique_constraint", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_add_not_null_constraint(self, seqid, iprot, oprot): + args = add_not_null_constraint_args() + args.read(iprot) + iprot.readMessageEnd() + result = add_not_null_constraint_result() + try: + self._handler.add_not_null_constraint(args.req) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except NoSuchObjectException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except MetaException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("add_not_null_constraint", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_add_default_constraint(self, seqid, iprot, oprot): + args = add_default_constraint_args() + args.read(iprot) + iprot.readMessageEnd() + result = add_default_constraint_result() + try: + self._handler.add_default_constraint(args.req) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except NoSuchObjectException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except MetaException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("add_default_constraint", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_add_check_constraint(self, seqid, iprot, oprot): + args = add_check_constraint_args() + args.read(iprot) + iprot.readMessageEnd() + result = add_check_constraint_result() + try: + self._handler.add_check_constraint(args.req) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except NoSuchObjectException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except MetaException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("add_check_constraint", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_translate_table_dryrun(self, seqid, iprot, oprot): + args = translate_table_dryrun_args() + args.read(iprot) + iprot.readMessageEnd() + result = translate_table_dryrun_result() + try: + result.success = self._handler.translate_table_dryrun(args.request) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except AlreadyExistsException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except InvalidObjectException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except MetaException as o3: + msg_type = TMessageType.REPLY + result.o3 = o3 + except NoSuchObjectException as o4: + msg_type = TMessageType.REPLY + result.o4 = o4 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("translate_table_dryrun", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_drop_table(self, seqid, iprot, oprot): + args = drop_table_args() + args.read(iprot) + iprot.readMessageEnd() + result = drop_table_result() + try: + self._handler.drop_table(args.dbname, args.name, args.deleteData) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except NoSuchObjectException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except MetaException as o3: + msg_type = TMessageType.REPLY + result.o3 = o3 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("drop_table", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_drop_table_with_environment_context(self, seqid, iprot, oprot): + args = drop_table_with_environment_context_args() + args.read(iprot) + iprot.readMessageEnd() + result = drop_table_with_environment_context_result() + try: + self._handler.drop_table_with_environment_context(args.dbname, args.name, args.deleteData, args.environment_context) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except NoSuchObjectException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except MetaException as o3: + msg_type = TMessageType.REPLY + result.o3 = o3 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("drop_table_with_environment_context", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_truncate_table(self, seqid, iprot, oprot): + args = truncate_table_args() + args.read(iprot) + iprot.readMessageEnd() + result = truncate_table_result() + try: + self._handler.truncate_table(args.dbName, args.tableName, args.partNames) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except MetaException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("truncate_table", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_truncate_table_req(self, seqid, iprot, oprot): + args = truncate_table_req_args() + args.read(iprot) + iprot.readMessageEnd() + result = truncate_table_req_result() + try: + result.success = self._handler.truncate_table_req(args.req) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except MetaException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("truncate_table_req", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_get_tables(self, seqid, iprot, oprot): + args = get_tables_args() + args.read(iprot) + iprot.readMessageEnd() + result = get_tables_result() + try: + result.success = self._handler.get_tables(args.db_name, args.pattern) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except MetaException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("get_tables", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_get_tables_by_type(self, seqid, iprot, oprot): + args = get_tables_by_type_args() + args.read(iprot) + iprot.readMessageEnd() + result = get_tables_by_type_result() + try: + result.success = self._handler.get_tables_by_type(args.db_name, args.pattern, args.tableType) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except MetaException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("get_tables_by_type", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_get_all_materialized_view_objects_for_rewriting(self, seqid, iprot, oprot): + args = get_all_materialized_view_objects_for_rewriting_args() + args.read(iprot) + iprot.readMessageEnd() + result = get_all_materialized_view_objects_for_rewriting_result() + try: + result.success = self._handler.get_all_materialized_view_objects_for_rewriting() + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except MetaException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("get_all_materialized_view_objects_for_rewriting", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_get_materialized_views_for_rewriting(self, seqid, iprot, oprot): + args = get_materialized_views_for_rewriting_args() + args.read(iprot) + iprot.readMessageEnd() + result = get_materialized_views_for_rewriting_result() + try: + result.success = self._handler.get_materialized_views_for_rewriting(args.db_name) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except MetaException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("get_materialized_views_for_rewriting", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_get_table_meta(self, seqid, iprot, oprot): + args = get_table_meta_args() + args.read(iprot) + iprot.readMessageEnd() + result = get_table_meta_result() + try: + result.success = self._handler.get_table_meta(args.db_patterns, args.tbl_patterns, args.tbl_types) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except MetaException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("get_table_meta", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_get_all_tables(self, seqid, iprot, oprot): + args = get_all_tables_args() + args.read(iprot) + iprot.readMessageEnd() + result = get_all_tables_result() + try: + result.success = self._handler.get_all_tables(args.db_name) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except MetaException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("get_all_tables", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_get_table(self, seqid, iprot, oprot): + args = get_table_args() + args.read(iprot) + iprot.readMessageEnd() + result = get_table_result() + try: + result.success = self._handler.get_table(args.dbname, args.tbl_name) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except MetaException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except NoSuchObjectException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("get_table", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_get_table_objects_by_name(self, seqid, iprot, oprot): + args = get_table_objects_by_name_args() + args.read(iprot) + iprot.readMessageEnd() + result = get_table_objects_by_name_result() + try: + result.success = self._handler.get_table_objects_by_name(args.dbname, args.tbl_names) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("get_table_objects_by_name", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_get_tables_ext(self, seqid, iprot, oprot): + args = get_tables_ext_args() + args.read(iprot) + iprot.readMessageEnd() + result = get_tables_ext_result() + try: + result.success = self._handler.get_tables_ext(args.req) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except MetaException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("get_tables_ext", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_get_table_req(self, seqid, iprot, oprot): + args = get_table_req_args() + args.read(iprot) + iprot.readMessageEnd() + result = get_table_req_result() + try: + result.success = self._handler.get_table_req(args.req) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except MetaException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except NoSuchObjectException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("get_table_req", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_get_table_objects_by_name_req(self, seqid, iprot, oprot): + args = get_table_objects_by_name_req_args() + args.read(iprot) + iprot.readMessageEnd() + result = get_table_objects_by_name_req_result() + try: + result.success = self._handler.get_table_objects_by_name_req(args.req) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except MetaException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except InvalidOperationException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except UnknownDBException as o3: + msg_type = TMessageType.REPLY + result.o3 = o3 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("get_table_objects_by_name_req", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_get_materialization_invalidation_info(self, seqid, iprot, oprot): + args = get_materialization_invalidation_info_args() + args.read(iprot) + iprot.readMessageEnd() + result = get_materialization_invalidation_info_result() + try: + result.success = self._handler.get_materialization_invalidation_info(args.creation_metadata, args.validTxnList) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except MetaException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except InvalidOperationException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except UnknownDBException as o3: + msg_type = TMessageType.REPLY + result.o3 = o3 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("get_materialization_invalidation_info", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_update_creation_metadata(self, seqid, iprot, oprot): + args = update_creation_metadata_args() + args.read(iprot) + iprot.readMessageEnd() + result = update_creation_metadata_result() + try: + self._handler.update_creation_metadata(args.catName, args.dbname, args.tbl_name, args.creation_metadata) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except MetaException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except InvalidOperationException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except UnknownDBException as o3: + msg_type = TMessageType.REPLY + result.o3 = o3 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("update_creation_metadata", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_get_table_names_by_filter(self, seqid, iprot, oprot): + args = get_table_names_by_filter_args() + args.read(iprot) + iprot.readMessageEnd() + result = get_table_names_by_filter_result() + try: + result.success = self._handler.get_table_names_by_filter(args.dbname, args.filter, args.max_tables) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except MetaException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except InvalidOperationException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except UnknownDBException as o3: + msg_type = TMessageType.REPLY + result.o3 = o3 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("get_table_names_by_filter", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_alter_table(self, seqid, iprot, oprot): + args = alter_table_args() + args.read(iprot) + iprot.readMessageEnd() + result = alter_table_result() + try: + self._handler.alter_table(args.dbname, args.tbl_name, args.new_tbl) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except InvalidOperationException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except MetaException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("alter_table", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_alter_table_with_environment_context(self, seqid, iprot, oprot): + args = alter_table_with_environment_context_args() + args.read(iprot) + iprot.readMessageEnd() + result = alter_table_with_environment_context_result() + try: + self._handler.alter_table_with_environment_context(args.dbname, args.tbl_name, args.new_tbl, args.environment_context) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except InvalidOperationException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except MetaException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("alter_table_with_environment_context", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_alter_table_with_cascade(self, seqid, iprot, oprot): + args = alter_table_with_cascade_args() + args.read(iprot) + iprot.readMessageEnd() + result = alter_table_with_cascade_result() + try: + self._handler.alter_table_with_cascade(args.dbname, args.tbl_name, args.new_tbl, args.cascade) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except InvalidOperationException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except MetaException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("alter_table_with_cascade", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_alter_table_req(self, seqid, iprot, oprot): + args = alter_table_req_args() + args.read(iprot) + iprot.readMessageEnd() + result = alter_table_req_result() + try: + result.success = self._handler.alter_table_req(args.req) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except InvalidOperationException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except MetaException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("alter_table_req", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_add_partition(self, seqid, iprot, oprot): + args = add_partition_args() + args.read(iprot) + iprot.readMessageEnd() + result = add_partition_result() + try: + result.success = self._handler.add_partition(args.new_part) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except InvalidObjectException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except AlreadyExistsException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except MetaException as o3: + msg_type = TMessageType.REPLY + result.o3 = o3 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("add_partition", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_add_partition_with_environment_context(self, seqid, iprot, oprot): + args = add_partition_with_environment_context_args() + args.read(iprot) + iprot.readMessageEnd() + result = add_partition_with_environment_context_result() + try: + result.success = self._handler.add_partition_with_environment_context(args.new_part, args.environment_context) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except InvalidObjectException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except AlreadyExistsException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except MetaException as o3: + msg_type = TMessageType.REPLY + result.o3 = o3 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("add_partition_with_environment_context", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_add_partitions(self, seqid, iprot, oprot): + args = add_partitions_args() + args.read(iprot) + iprot.readMessageEnd() + result = add_partitions_result() + try: + result.success = self._handler.add_partitions(args.new_parts) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except InvalidObjectException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except AlreadyExistsException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except MetaException as o3: + msg_type = TMessageType.REPLY + result.o3 = o3 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("add_partitions", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_add_partitions_pspec(self, seqid, iprot, oprot): + args = add_partitions_pspec_args() + args.read(iprot) + iprot.readMessageEnd() + result = add_partitions_pspec_result() + try: + result.success = self._handler.add_partitions_pspec(args.new_parts) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except InvalidObjectException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except AlreadyExistsException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except MetaException as o3: + msg_type = TMessageType.REPLY + result.o3 = o3 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("add_partitions_pspec", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_append_partition(self, seqid, iprot, oprot): + args = append_partition_args() + args.read(iprot) + iprot.readMessageEnd() + result = append_partition_result() + try: + result.success = self._handler.append_partition(args.db_name, args.tbl_name, args.part_vals) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except InvalidObjectException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except AlreadyExistsException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except MetaException as o3: + msg_type = TMessageType.REPLY + result.o3 = o3 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("append_partition", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_add_partitions_req(self, seqid, iprot, oprot): + args = add_partitions_req_args() + args.read(iprot) + iprot.readMessageEnd() + result = add_partitions_req_result() + try: + result.success = self._handler.add_partitions_req(args.request) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except InvalidObjectException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except AlreadyExistsException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except MetaException as o3: + msg_type = TMessageType.REPLY + result.o3 = o3 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("add_partitions_req", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_append_partition_with_environment_context(self, seqid, iprot, oprot): + args = append_partition_with_environment_context_args() + args.read(iprot) + iprot.readMessageEnd() + result = append_partition_with_environment_context_result() + try: + result.success = self._handler.append_partition_with_environment_context( + args.db_name, args.tbl_name, args.part_vals, args.environment_context + ) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except InvalidObjectException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except AlreadyExistsException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except MetaException as o3: + msg_type = TMessageType.REPLY + result.o3 = o3 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("append_partition_with_environment_context", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_append_partition_by_name(self, seqid, iprot, oprot): + args = append_partition_by_name_args() + args.read(iprot) + iprot.readMessageEnd() + result = append_partition_by_name_result() + try: + result.success = self._handler.append_partition_by_name(args.db_name, args.tbl_name, args.part_name) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except InvalidObjectException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except AlreadyExistsException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except MetaException as o3: + msg_type = TMessageType.REPLY + result.o3 = o3 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("append_partition_by_name", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_append_partition_by_name_with_environment_context(self, seqid, iprot, oprot): + args = append_partition_by_name_with_environment_context_args() + args.read(iprot) + iprot.readMessageEnd() + result = append_partition_by_name_with_environment_context_result() + try: + result.success = self._handler.append_partition_by_name_with_environment_context( + args.db_name, args.tbl_name, args.part_name, args.environment_context + ) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except InvalidObjectException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except AlreadyExistsException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except MetaException as o3: + msg_type = TMessageType.REPLY + result.o3 = o3 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("append_partition_by_name_with_environment_context", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_drop_partition(self, seqid, iprot, oprot): + args = drop_partition_args() + args.read(iprot) + iprot.readMessageEnd() + result = drop_partition_result() + try: + result.success = self._handler.drop_partition(args.db_name, args.tbl_name, args.part_vals, args.deleteData) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except NoSuchObjectException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except MetaException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("drop_partition", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_drop_partition_with_environment_context(self, seqid, iprot, oprot): + args = drop_partition_with_environment_context_args() + args.read(iprot) + iprot.readMessageEnd() + result = drop_partition_with_environment_context_result() + try: + result.success = self._handler.drop_partition_with_environment_context( + args.db_name, args.tbl_name, args.part_vals, args.deleteData, args.environment_context + ) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except NoSuchObjectException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except MetaException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("drop_partition_with_environment_context", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_drop_partition_by_name(self, seqid, iprot, oprot): + args = drop_partition_by_name_args() + args.read(iprot) + iprot.readMessageEnd() + result = drop_partition_by_name_result() + try: + result.success = self._handler.drop_partition_by_name(args.db_name, args.tbl_name, args.part_name, args.deleteData) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except NoSuchObjectException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except MetaException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("drop_partition_by_name", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_drop_partition_by_name_with_environment_context(self, seqid, iprot, oprot): + args = drop_partition_by_name_with_environment_context_args() + args.read(iprot) + iprot.readMessageEnd() + result = drop_partition_by_name_with_environment_context_result() + try: + result.success = self._handler.drop_partition_by_name_with_environment_context( + args.db_name, args.tbl_name, args.part_name, args.deleteData, args.environment_context + ) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except NoSuchObjectException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except MetaException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("drop_partition_by_name_with_environment_context", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_drop_partitions_req(self, seqid, iprot, oprot): + args = drop_partitions_req_args() + args.read(iprot) + iprot.readMessageEnd() + result = drop_partitions_req_result() + try: + result.success = self._handler.drop_partitions_req(args.req) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except NoSuchObjectException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except MetaException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("drop_partitions_req", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_get_partition(self, seqid, iprot, oprot): + args = get_partition_args() + args.read(iprot) + iprot.readMessageEnd() + result = get_partition_result() + try: + result.success = self._handler.get_partition(args.db_name, args.tbl_name, args.part_vals) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except MetaException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except NoSuchObjectException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("get_partition", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_get_partition_req(self, seqid, iprot, oprot): + args = get_partition_req_args() + args.read(iprot) + iprot.readMessageEnd() + result = get_partition_req_result() + try: + result.success = self._handler.get_partition_req(args.req) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except MetaException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except NoSuchObjectException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("get_partition_req", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_exchange_partition(self, seqid, iprot, oprot): + args = exchange_partition_args() + args.read(iprot) + iprot.readMessageEnd() + result = exchange_partition_result() + try: + result.success = self._handler.exchange_partition( + args.partitionSpecs, args.source_db, args.source_table_name, args.dest_db, args.dest_table_name + ) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except MetaException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except NoSuchObjectException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except InvalidObjectException as o3: + msg_type = TMessageType.REPLY + result.o3 = o3 + except InvalidInputException as o4: + msg_type = TMessageType.REPLY + result.o4 = o4 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("exchange_partition", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_exchange_partitions(self, seqid, iprot, oprot): + args = exchange_partitions_args() + args.read(iprot) + iprot.readMessageEnd() + result = exchange_partitions_result() + try: + result.success = self._handler.exchange_partitions( + args.partitionSpecs, args.source_db, args.source_table_name, args.dest_db, args.dest_table_name + ) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except MetaException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except NoSuchObjectException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except InvalidObjectException as o3: + msg_type = TMessageType.REPLY + result.o3 = o3 + except InvalidInputException as o4: + msg_type = TMessageType.REPLY + result.o4 = o4 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("exchange_partitions", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_get_partition_with_auth(self, seqid, iprot, oprot): + args = get_partition_with_auth_args() + args.read(iprot) + iprot.readMessageEnd() + result = get_partition_with_auth_result() + try: + result.success = self._handler.get_partition_with_auth( + args.db_name, args.tbl_name, args.part_vals, args.user_name, args.group_names + ) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except MetaException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except NoSuchObjectException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("get_partition_with_auth", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_get_partition_by_name(self, seqid, iprot, oprot): + args = get_partition_by_name_args() + args.read(iprot) + iprot.readMessageEnd() + result = get_partition_by_name_result() + try: + result.success = self._handler.get_partition_by_name(args.db_name, args.tbl_name, args.part_name) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except MetaException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except NoSuchObjectException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("get_partition_by_name", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_get_partitions(self, seqid, iprot, oprot): + args = get_partitions_args() + args.read(iprot) + iprot.readMessageEnd() + result = get_partitions_result() + try: + result.success = self._handler.get_partitions(args.db_name, args.tbl_name, args.max_parts) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except NoSuchObjectException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except MetaException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("get_partitions", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_get_partitions_req(self, seqid, iprot, oprot): + args = get_partitions_req_args() + args.read(iprot) + iprot.readMessageEnd() + result = get_partitions_req_result() + try: + result.success = self._handler.get_partitions_req(args.req) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except NoSuchObjectException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except MetaException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("get_partitions_req", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_get_partitions_with_auth(self, seqid, iprot, oprot): + args = get_partitions_with_auth_args() + args.read(iprot) + iprot.readMessageEnd() + result = get_partitions_with_auth_result() + try: + result.success = self._handler.get_partitions_with_auth( + args.db_name, args.tbl_name, args.max_parts, args.user_name, args.group_names + ) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except NoSuchObjectException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except MetaException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("get_partitions_with_auth", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_get_partitions_pspec(self, seqid, iprot, oprot): + args = get_partitions_pspec_args() + args.read(iprot) + iprot.readMessageEnd() + result = get_partitions_pspec_result() + try: + result.success = self._handler.get_partitions_pspec(args.db_name, args.tbl_name, args.max_parts) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except NoSuchObjectException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except MetaException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("get_partitions_pspec", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_get_partition_names(self, seqid, iprot, oprot): + args = get_partition_names_args() + args.read(iprot) + iprot.readMessageEnd() + result = get_partition_names_result() + try: + result.success = self._handler.get_partition_names(args.db_name, args.tbl_name, args.max_parts) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except NoSuchObjectException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except MetaException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("get_partition_names", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_get_partition_values(self, seqid, iprot, oprot): + args = get_partition_values_args() + args.read(iprot) + iprot.readMessageEnd() + result = get_partition_values_result() + try: + result.success = self._handler.get_partition_values(args.request) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except MetaException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except NoSuchObjectException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("get_partition_values", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_get_partitions_ps(self, seqid, iprot, oprot): + args = get_partitions_ps_args() + args.read(iprot) + iprot.readMessageEnd() + result = get_partitions_ps_result() + try: + result.success = self._handler.get_partitions_ps(args.db_name, args.tbl_name, args.part_vals, args.max_parts) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except MetaException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except NoSuchObjectException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("get_partitions_ps", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_get_partitions_ps_with_auth(self, seqid, iprot, oprot): + args = get_partitions_ps_with_auth_args() + args.read(iprot) + iprot.readMessageEnd() + result = get_partitions_ps_with_auth_result() + try: + result.success = self._handler.get_partitions_ps_with_auth( + args.db_name, args.tbl_name, args.part_vals, args.max_parts, args.user_name, args.group_names + ) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except NoSuchObjectException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except MetaException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("get_partitions_ps_with_auth", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_get_partitions_ps_with_auth_req(self, seqid, iprot, oprot): + args = get_partitions_ps_with_auth_req_args() + args.read(iprot) + iprot.readMessageEnd() + result = get_partitions_ps_with_auth_req_result() + try: + result.success = self._handler.get_partitions_ps_with_auth_req(args.req) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except MetaException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except NoSuchObjectException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("get_partitions_ps_with_auth_req", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_get_partition_names_ps(self, seqid, iprot, oprot): + args = get_partition_names_ps_args() + args.read(iprot) + iprot.readMessageEnd() + result = get_partition_names_ps_result() + try: + result.success = self._handler.get_partition_names_ps(args.db_name, args.tbl_name, args.part_vals, args.max_parts) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except MetaException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except NoSuchObjectException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("get_partition_names_ps", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_get_partition_names_ps_req(self, seqid, iprot, oprot): + args = get_partition_names_ps_req_args() + args.read(iprot) + iprot.readMessageEnd() + result = get_partition_names_ps_req_result() + try: + result.success = self._handler.get_partition_names_ps_req(args.req) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except MetaException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except NoSuchObjectException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("get_partition_names_ps_req", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_get_partition_names_req(self, seqid, iprot, oprot): + args = get_partition_names_req_args() + args.read(iprot) + iprot.readMessageEnd() + result = get_partition_names_req_result() + try: + result.success = self._handler.get_partition_names_req(args.req) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except MetaException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except NoSuchObjectException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("get_partition_names_req", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_get_partitions_by_filter(self, seqid, iprot, oprot): + args = get_partitions_by_filter_args() + args.read(iprot) + iprot.readMessageEnd() + result = get_partitions_by_filter_result() + try: + result.success = self._handler.get_partitions_by_filter(args.db_name, args.tbl_name, args.filter, args.max_parts) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except MetaException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except NoSuchObjectException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("get_partitions_by_filter", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_get_part_specs_by_filter(self, seqid, iprot, oprot): + args = get_part_specs_by_filter_args() + args.read(iprot) + iprot.readMessageEnd() + result = get_part_specs_by_filter_result() + try: + result.success = self._handler.get_part_specs_by_filter(args.db_name, args.tbl_name, args.filter, args.max_parts) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except MetaException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except NoSuchObjectException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("get_part_specs_by_filter", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_get_partitions_by_expr(self, seqid, iprot, oprot): + args = get_partitions_by_expr_args() + args.read(iprot) + iprot.readMessageEnd() + result = get_partitions_by_expr_result() + try: + result.success = self._handler.get_partitions_by_expr(args.req) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except MetaException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except NoSuchObjectException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("get_partitions_by_expr", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_get_partitions_spec_by_expr(self, seqid, iprot, oprot): + args = get_partitions_spec_by_expr_args() + args.read(iprot) + iprot.readMessageEnd() + result = get_partitions_spec_by_expr_result() + try: + result.success = self._handler.get_partitions_spec_by_expr(args.req) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except MetaException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except NoSuchObjectException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("get_partitions_spec_by_expr", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_get_num_partitions_by_filter(self, seqid, iprot, oprot): + args = get_num_partitions_by_filter_args() + args.read(iprot) + iprot.readMessageEnd() + result = get_num_partitions_by_filter_result() + try: + result.success = self._handler.get_num_partitions_by_filter(args.db_name, args.tbl_name, args.filter) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except MetaException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except NoSuchObjectException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("get_num_partitions_by_filter", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_get_partitions_by_names(self, seqid, iprot, oprot): + args = get_partitions_by_names_args() + args.read(iprot) + iprot.readMessageEnd() + result = get_partitions_by_names_result() + try: + result.success = self._handler.get_partitions_by_names(args.db_name, args.tbl_name, args.names) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except MetaException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except NoSuchObjectException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("get_partitions_by_names", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_get_partitions_by_names_req(self, seqid, iprot, oprot): + args = get_partitions_by_names_req_args() + args.read(iprot) + iprot.readMessageEnd() + result = get_partitions_by_names_req_result() + try: + result.success = self._handler.get_partitions_by_names_req(args.req) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except MetaException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except NoSuchObjectException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("get_partitions_by_names_req", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_alter_partition(self, seqid, iprot, oprot): + args = alter_partition_args() + args.read(iprot) + iprot.readMessageEnd() + result = alter_partition_result() + try: + self._handler.alter_partition(args.db_name, args.tbl_name, args.new_part) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except InvalidOperationException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except MetaException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("alter_partition", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_alter_partitions(self, seqid, iprot, oprot): + args = alter_partitions_args() + args.read(iprot) + iprot.readMessageEnd() + result = alter_partitions_result() + try: + self._handler.alter_partitions(args.db_name, args.tbl_name, args.new_parts) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except InvalidOperationException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except MetaException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("alter_partitions", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_alter_partitions_with_environment_context(self, seqid, iprot, oprot): + args = alter_partitions_with_environment_context_args() + args.read(iprot) + iprot.readMessageEnd() + result = alter_partitions_with_environment_context_result() + try: + self._handler.alter_partitions_with_environment_context( + args.db_name, args.tbl_name, args.new_parts, args.environment_context + ) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except InvalidOperationException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except MetaException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("alter_partitions_with_environment_context", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_alter_partitions_req(self, seqid, iprot, oprot): + args = alter_partitions_req_args() + args.read(iprot) + iprot.readMessageEnd() + result = alter_partitions_req_result() + try: + result.success = self._handler.alter_partitions_req(args.req) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except InvalidOperationException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except MetaException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("alter_partitions_req", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_alter_partition_with_environment_context(self, seqid, iprot, oprot): + args = alter_partition_with_environment_context_args() + args.read(iprot) + iprot.readMessageEnd() + result = alter_partition_with_environment_context_result() + try: + self._handler.alter_partition_with_environment_context( + args.db_name, args.tbl_name, args.new_part, args.environment_context + ) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except InvalidOperationException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except MetaException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("alter_partition_with_environment_context", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_rename_partition(self, seqid, iprot, oprot): + args = rename_partition_args() + args.read(iprot) + iprot.readMessageEnd() + result = rename_partition_result() + try: + self._handler.rename_partition(args.db_name, args.tbl_name, args.part_vals, args.new_part) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except InvalidOperationException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except MetaException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("rename_partition", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_rename_partition_req(self, seqid, iprot, oprot): + args = rename_partition_req_args() + args.read(iprot) + iprot.readMessageEnd() + result = rename_partition_req_result() + try: + result.success = self._handler.rename_partition_req(args.req) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except InvalidOperationException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except MetaException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("rename_partition_req", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_partition_name_has_valid_characters(self, seqid, iprot, oprot): + args = partition_name_has_valid_characters_args() + args.read(iprot) + iprot.readMessageEnd() + result = partition_name_has_valid_characters_result() + try: + result.success = self._handler.partition_name_has_valid_characters(args.part_vals, args.throw_exception) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except MetaException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("partition_name_has_valid_characters", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_get_config_value(self, seqid, iprot, oprot): + args = get_config_value_args() + args.read(iprot) + iprot.readMessageEnd() + result = get_config_value_result() + try: + result.success = self._handler.get_config_value(args.name, args.defaultValue) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except ConfigValSecurityException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("get_config_value", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_partition_name_to_vals(self, seqid, iprot, oprot): + args = partition_name_to_vals_args() + args.read(iprot) + iprot.readMessageEnd() + result = partition_name_to_vals_result() + try: + result.success = self._handler.partition_name_to_vals(args.part_name) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except MetaException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("partition_name_to_vals", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_partition_name_to_spec(self, seqid, iprot, oprot): + args = partition_name_to_spec_args() + args.read(iprot) + iprot.readMessageEnd() + result = partition_name_to_spec_result() + try: + result.success = self._handler.partition_name_to_spec(args.part_name) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except MetaException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("partition_name_to_spec", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_markPartitionForEvent(self, seqid, iprot, oprot): + args = markPartitionForEvent_args() + args.read(iprot) + iprot.readMessageEnd() + result = markPartitionForEvent_result() + try: + self._handler.markPartitionForEvent(args.db_name, args.tbl_name, args.part_vals, args.eventType) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except MetaException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except NoSuchObjectException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except UnknownDBException as o3: + msg_type = TMessageType.REPLY + result.o3 = o3 + except UnknownTableException as o4: + msg_type = TMessageType.REPLY + result.o4 = o4 + except UnknownPartitionException as o5: + msg_type = TMessageType.REPLY + result.o5 = o5 + except InvalidPartitionException as o6: + msg_type = TMessageType.REPLY + result.o6 = o6 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("markPartitionForEvent", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_isPartitionMarkedForEvent(self, seqid, iprot, oprot): + args = isPartitionMarkedForEvent_args() + args.read(iprot) + iprot.readMessageEnd() + result = isPartitionMarkedForEvent_result() + try: + result.success = self._handler.isPartitionMarkedForEvent(args.db_name, args.tbl_name, args.part_vals, args.eventType) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except MetaException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except NoSuchObjectException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except UnknownDBException as o3: + msg_type = TMessageType.REPLY + result.o3 = o3 + except UnknownTableException as o4: + msg_type = TMessageType.REPLY + result.o4 = o4 + except UnknownPartitionException as o5: + msg_type = TMessageType.REPLY + result.o5 = o5 + except InvalidPartitionException as o6: + msg_type = TMessageType.REPLY + result.o6 = o6 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("isPartitionMarkedForEvent", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_get_primary_keys(self, seqid, iprot, oprot): + args = get_primary_keys_args() + args.read(iprot) + iprot.readMessageEnd() + result = get_primary_keys_result() + try: + result.success = self._handler.get_primary_keys(args.request) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except MetaException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except NoSuchObjectException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("get_primary_keys", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_get_foreign_keys(self, seqid, iprot, oprot): + args = get_foreign_keys_args() + args.read(iprot) + iprot.readMessageEnd() + result = get_foreign_keys_result() + try: + result.success = self._handler.get_foreign_keys(args.request) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except MetaException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except NoSuchObjectException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("get_foreign_keys", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_get_unique_constraints(self, seqid, iprot, oprot): + args = get_unique_constraints_args() + args.read(iprot) + iprot.readMessageEnd() + result = get_unique_constraints_result() + try: + result.success = self._handler.get_unique_constraints(args.request) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except MetaException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except NoSuchObjectException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("get_unique_constraints", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_get_not_null_constraints(self, seqid, iprot, oprot): + args = get_not_null_constraints_args() + args.read(iprot) + iprot.readMessageEnd() + result = get_not_null_constraints_result() + try: + result.success = self._handler.get_not_null_constraints(args.request) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except MetaException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except NoSuchObjectException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("get_not_null_constraints", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_get_default_constraints(self, seqid, iprot, oprot): + args = get_default_constraints_args() + args.read(iprot) + iprot.readMessageEnd() + result = get_default_constraints_result() + try: + result.success = self._handler.get_default_constraints(args.request) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except MetaException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except NoSuchObjectException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("get_default_constraints", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_get_check_constraints(self, seqid, iprot, oprot): + args = get_check_constraints_args() + args.read(iprot) + iprot.readMessageEnd() + result = get_check_constraints_result() + try: + result.success = self._handler.get_check_constraints(args.request) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except MetaException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except NoSuchObjectException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("get_check_constraints", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_get_all_table_constraints(self, seqid, iprot, oprot): + args = get_all_table_constraints_args() + args.read(iprot) + iprot.readMessageEnd() + result = get_all_table_constraints_result() + try: + result.success = self._handler.get_all_table_constraints(args.request) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except MetaException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except NoSuchObjectException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("get_all_table_constraints", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_update_table_column_statistics(self, seqid, iprot, oprot): + args = update_table_column_statistics_args() + args.read(iprot) + iprot.readMessageEnd() + result = update_table_column_statistics_result() + try: + result.success = self._handler.update_table_column_statistics(args.stats_obj) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except NoSuchObjectException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except InvalidObjectException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except MetaException as o3: + msg_type = TMessageType.REPLY + result.o3 = o3 + except InvalidInputException as o4: + msg_type = TMessageType.REPLY + result.o4 = o4 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("update_table_column_statistics", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_update_partition_column_statistics(self, seqid, iprot, oprot): + args = update_partition_column_statistics_args() + args.read(iprot) + iprot.readMessageEnd() + result = update_partition_column_statistics_result() + try: + result.success = self._handler.update_partition_column_statistics(args.stats_obj) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except NoSuchObjectException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except InvalidObjectException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except MetaException as o3: + msg_type = TMessageType.REPLY + result.o3 = o3 + except InvalidInputException as o4: + msg_type = TMessageType.REPLY + result.o4 = o4 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("update_partition_column_statistics", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_update_table_column_statistics_req(self, seqid, iprot, oprot): + args = update_table_column_statistics_req_args() + args.read(iprot) + iprot.readMessageEnd() + result = update_table_column_statistics_req_result() + try: + result.success = self._handler.update_table_column_statistics_req(args.req) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except NoSuchObjectException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except InvalidObjectException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except MetaException as o3: + msg_type = TMessageType.REPLY + result.o3 = o3 + except InvalidInputException as o4: + msg_type = TMessageType.REPLY + result.o4 = o4 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("update_table_column_statistics_req", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_update_partition_column_statistics_req(self, seqid, iprot, oprot): + args = update_partition_column_statistics_req_args() + args.read(iprot) + iprot.readMessageEnd() + result = update_partition_column_statistics_req_result() + try: + result.success = self._handler.update_partition_column_statistics_req(args.req) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except NoSuchObjectException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except InvalidObjectException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except MetaException as o3: + msg_type = TMessageType.REPLY + result.o3 = o3 + except InvalidInputException as o4: + msg_type = TMessageType.REPLY + result.o4 = o4 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("update_partition_column_statistics_req", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_update_transaction_statistics(self, seqid, iprot, oprot): + args = update_transaction_statistics_args() + args.read(iprot) + iprot.readMessageEnd() + result = update_transaction_statistics_result() + try: + self._handler.update_transaction_statistics(args.req) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except MetaException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("update_transaction_statistics", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_get_table_column_statistics(self, seqid, iprot, oprot): + args = get_table_column_statistics_args() + args.read(iprot) + iprot.readMessageEnd() + result = get_table_column_statistics_result() + try: + result.success = self._handler.get_table_column_statistics(args.db_name, args.tbl_name, args.col_name) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except NoSuchObjectException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except MetaException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except InvalidInputException as o3: + msg_type = TMessageType.REPLY + result.o3 = o3 + except InvalidObjectException as o4: + msg_type = TMessageType.REPLY + result.o4 = o4 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("get_table_column_statistics", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_get_partition_column_statistics(self, seqid, iprot, oprot): + args = get_partition_column_statistics_args() + args.read(iprot) + iprot.readMessageEnd() + result = get_partition_column_statistics_result() + try: + result.success = self._handler.get_partition_column_statistics( + args.db_name, args.tbl_name, args.part_name, args.col_name + ) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except NoSuchObjectException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except MetaException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except InvalidInputException as o3: + msg_type = TMessageType.REPLY + result.o3 = o3 + except InvalidObjectException as o4: + msg_type = TMessageType.REPLY + result.o4 = o4 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("get_partition_column_statistics", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_get_table_statistics_req(self, seqid, iprot, oprot): + args = get_table_statistics_req_args() + args.read(iprot) + iprot.readMessageEnd() + result = get_table_statistics_req_result() + try: + result.success = self._handler.get_table_statistics_req(args.request) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except NoSuchObjectException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except MetaException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("get_table_statistics_req", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_get_partitions_statistics_req(self, seqid, iprot, oprot): + args = get_partitions_statistics_req_args() + args.read(iprot) + iprot.readMessageEnd() + result = get_partitions_statistics_req_result() + try: + result.success = self._handler.get_partitions_statistics_req(args.request) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except NoSuchObjectException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except MetaException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("get_partitions_statistics_req", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_get_aggr_stats_for(self, seqid, iprot, oprot): + args = get_aggr_stats_for_args() + args.read(iprot) + iprot.readMessageEnd() + result = get_aggr_stats_for_result() + try: + result.success = self._handler.get_aggr_stats_for(args.request) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except NoSuchObjectException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except MetaException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("get_aggr_stats_for", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_set_aggr_stats_for(self, seqid, iprot, oprot): + args = set_aggr_stats_for_args() + args.read(iprot) + iprot.readMessageEnd() + result = set_aggr_stats_for_result() + try: + result.success = self._handler.set_aggr_stats_for(args.request) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except NoSuchObjectException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except InvalidObjectException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except MetaException as o3: + msg_type = TMessageType.REPLY + result.o3 = o3 + except InvalidInputException as o4: + msg_type = TMessageType.REPLY + result.o4 = o4 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("set_aggr_stats_for", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_delete_partition_column_statistics(self, seqid, iprot, oprot): + args = delete_partition_column_statistics_args() + args.read(iprot) + iprot.readMessageEnd() + result = delete_partition_column_statistics_result() + try: + result.success = self._handler.delete_partition_column_statistics( + args.db_name, args.tbl_name, args.part_name, args.col_name, args.engine + ) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except NoSuchObjectException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except MetaException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except InvalidObjectException as o3: + msg_type = TMessageType.REPLY + result.o3 = o3 + except InvalidInputException as o4: + msg_type = TMessageType.REPLY + result.o4 = o4 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("delete_partition_column_statistics", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_delete_table_column_statistics(self, seqid, iprot, oprot): + args = delete_table_column_statistics_args() + args.read(iprot) + iprot.readMessageEnd() + result = delete_table_column_statistics_result() + try: + result.success = self._handler.delete_table_column_statistics(args.db_name, args.tbl_name, args.col_name, args.engine) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except NoSuchObjectException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except MetaException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except InvalidObjectException as o3: + msg_type = TMessageType.REPLY + result.o3 = o3 + except InvalidInputException as o4: + msg_type = TMessageType.REPLY + result.o4 = o4 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("delete_table_column_statistics", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_create_function(self, seqid, iprot, oprot): + args = create_function_args() + args.read(iprot) + iprot.readMessageEnd() + result = create_function_result() + try: + self._handler.create_function(args.func) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except AlreadyExistsException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except InvalidObjectException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except MetaException as o3: + msg_type = TMessageType.REPLY + result.o3 = o3 + except NoSuchObjectException as o4: + msg_type = TMessageType.REPLY + result.o4 = o4 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("create_function", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_drop_function(self, seqid, iprot, oprot): + args = drop_function_args() + args.read(iprot) + iprot.readMessageEnd() + result = drop_function_result() + try: + self._handler.drop_function(args.dbName, args.funcName) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except NoSuchObjectException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except MetaException as o3: + msg_type = TMessageType.REPLY + result.o3 = o3 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("drop_function", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_alter_function(self, seqid, iprot, oprot): + args = alter_function_args() + args.read(iprot) + iprot.readMessageEnd() + result = alter_function_result() + try: + self._handler.alter_function(args.dbName, args.funcName, args.newFunc) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except InvalidOperationException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except MetaException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("alter_function", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_get_functions(self, seqid, iprot, oprot): + args = get_functions_args() + args.read(iprot) + iprot.readMessageEnd() + result = get_functions_result() + try: + result.success = self._handler.get_functions(args.dbName, args.pattern) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except MetaException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("get_functions", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_get_function(self, seqid, iprot, oprot): + args = get_function_args() + args.read(iprot) + iprot.readMessageEnd() + result = get_function_result() + try: + result.success = self._handler.get_function(args.dbName, args.funcName) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except MetaException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except NoSuchObjectException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("get_function", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_get_all_functions(self, seqid, iprot, oprot): + args = get_all_functions_args() + args.read(iprot) + iprot.readMessageEnd() + result = get_all_functions_result() + try: + result.success = self._handler.get_all_functions() + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except MetaException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("get_all_functions", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_create_role(self, seqid, iprot, oprot): + args = create_role_args() + args.read(iprot) + iprot.readMessageEnd() + result = create_role_result() + try: + result.success = self._handler.create_role(args.role) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except MetaException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("create_role", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_drop_role(self, seqid, iprot, oprot): + args = drop_role_args() + args.read(iprot) + iprot.readMessageEnd() + result = drop_role_result() + try: + result.success = self._handler.drop_role(args.role_name) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except MetaException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("drop_role", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_get_role_names(self, seqid, iprot, oprot): + args = get_role_names_args() + args.read(iprot) + iprot.readMessageEnd() + result = get_role_names_result() + try: + result.success = self._handler.get_role_names() + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except MetaException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("get_role_names", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_grant_role(self, seqid, iprot, oprot): + args = grant_role_args() + args.read(iprot) + iprot.readMessageEnd() + result = grant_role_result() + try: + result.success = self._handler.grant_role( + args.role_name, args.principal_name, args.principal_type, args.grantor, args.grantorType, args.grant_option + ) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except MetaException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("grant_role", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_revoke_role(self, seqid, iprot, oprot): + args = revoke_role_args() + args.read(iprot) + iprot.readMessageEnd() + result = revoke_role_result() + try: + result.success = self._handler.revoke_role(args.role_name, args.principal_name, args.principal_type) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except MetaException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("revoke_role", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_list_roles(self, seqid, iprot, oprot): + args = list_roles_args() + args.read(iprot) + iprot.readMessageEnd() + result = list_roles_result() + try: + result.success = self._handler.list_roles(args.principal_name, args.principal_type) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except MetaException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("list_roles", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_grant_revoke_role(self, seqid, iprot, oprot): + args = grant_revoke_role_args() + args.read(iprot) + iprot.readMessageEnd() + result = grant_revoke_role_result() + try: + result.success = self._handler.grant_revoke_role(args.request) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except MetaException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("grant_revoke_role", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_get_principals_in_role(self, seqid, iprot, oprot): + args = get_principals_in_role_args() + args.read(iprot) + iprot.readMessageEnd() + result = get_principals_in_role_result() + try: + result.success = self._handler.get_principals_in_role(args.request) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except MetaException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("get_principals_in_role", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_get_role_grants_for_principal(self, seqid, iprot, oprot): + args = get_role_grants_for_principal_args() + args.read(iprot) + iprot.readMessageEnd() + result = get_role_grants_for_principal_result() + try: + result.success = self._handler.get_role_grants_for_principal(args.request) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except MetaException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("get_role_grants_for_principal", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_get_privilege_set(self, seqid, iprot, oprot): + args = get_privilege_set_args() + args.read(iprot) + iprot.readMessageEnd() + result = get_privilege_set_result() + try: + result.success = self._handler.get_privilege_set(args.hiveObject, args.user_name, args.group_names) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except MetaException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("get_privilege_set", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_list_privileges(self, seqid, iprot, oprot): + args = list_privileges_args() + args.read(iprot) + iprot.readMessageEnd() + result = list_privileges_result() + try: + result.success = self._handler.list_privileges(args.principal_name, args.principal_type, args.hiveObject) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except MetaException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("list_privileges", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_grant_privileges(self, seqid, iprot, oprot): + args = grant_privileges_args() + args.read(iprot) + iprot.readMessageEnd() + result = grant_privileges_result() + try: + result.success = self._handler.grant_privileges(args.privileges) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except MetaException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("grant_privileges", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_revoke_privileges(self, seqid, iprot, oprot): + args = revoke_privileges_args() + args.read(iprot) + iprot.readMessageEnd() + result = revoke_privileges_result() + try: + result.success = self._handler.revoke_privileges(args.privileges) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except MetaException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("revoke_privileges", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_grant_revoke_privileges(self, seqid, iprot, oprot): + args = grant_revoke_privileges_args() + args.read(iprot) + iprot.readMessageEnd() + result = grant_revoke_privileges_result() + try: + result.success = self._handler.grant_revoke_privileges(args.request) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except MetaException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("grant_revoke_privileges", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_refresh_privileges(self, seqid, iprot, oprot): + args = refresh_privileges_args() + args.read(iprot) + iprot.readMessageEnd() + result = refresh_privileges_result() + try: + result.success = self._handler.refresh_privileges(args.objToRefresh, args.authorizer, args.grantRequest) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except MetaException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("refresh_privileges", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_set_ugi(self, seqid, iprot, oprot): + args = set_ugi_args() + args.read(iprot) + iprot.readMessageEnd() + result = set_ugi_result() + try: + result.success = self._handler.set_ugi(args.user_name, args.group_names) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except MetaException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("set_ugi", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_get_delegation_token(self, seqid, iprot, oprot): + args = get_delegation_token_args() + args.read(iprot) + iprot.readMessageEnd() + result = get_delegation_token_result() + try: + result.success = self._handler.get_delegation_token(args.token_owner, args.renewer_kerberos_principal_name) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except MetaException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("get_delegation_token", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_renew_delegation_token(self, seqid, iprot, oprot): + args = renew_delegation_token_args() + args.read(iprot) + iprot.readMessageEnd() + result = renew_delegation_token_result() + try: + result.success = self._handler.renew_delegation_token(args.token_str_form) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except MetaException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("renew_delegation_token", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_cancel_delegation_token(self, seqid, iprot, oprot): + args = cancel_delegation_token_args() + args.read(iprot) + iprot.readMessageEnd() + result = cancel_delegation_token_result() + try: + self._handler.cancel_delegation_token(args.token_str_form) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except MetaException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("cancel_delegation_token", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_add_token(self, seqid, iprot, oprot): + args = add_token_args() + args.read(iprot) + iprot.readMessageEnd() + result = add_token_result() + try: + result.success = self._handler.add_token(args.token_identifier, args.delegation_token) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("add_token", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_remove_token(self, seqid, iprot, oprot): + args = remove_token_args() + args.read(iprot) + iprot.readMessageEnd() + result = remove_token_result() + try: + result.success = self._handler.remove_token(args.token_identifier) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("remove_token", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_get_token(self, seqid, iprot, oprot): + args = get_token_args() + args.read(iprot) + iprot.readMessageEnd() + result = get_token_result() + try: + result.success = self._handler.get_token(args.token_identifier) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("get_token", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_get_all_token_identifiers(self, seqid, iprot, oprot): + args = get_all_token_identifiers_args() + args.read(iprot) + iprot.readMessageEnd() + result = get_all_token_identifiers_result() + try: + result.success = self._handler.get_all_token_identifiers() + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("get_all_token_identifiers", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_add_master_key(self, seqid, iprot, oprot): + args = add_master_key_args() + args.read(iprot) + iprot.readMessageEnd() + result = add_master_key_result() + try: + result.success = self._handler.add_master_key(args.key) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except MetaException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("add_master_key", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_update_master_key(self, seqid, iprot, oprot): + args = update_master_key_args() + args.read(iprot) + iprot.readMessageEnd() + result = update_master_key_result() + try: + self._handler.update_master_key(args.seq_number, args.key) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except NoSuchObjectException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except MetaException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("update_master_key", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_remove_master_key(self, seqid, iprot, oprot): + args = remove_master_key_args() + args.read(iprot) + iprot.readMessageEnd() + result = remove_master_key_result() + try: + result.success = self._handler.remove_master_key(args.key_seq) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("remove_master_key", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_get_master_keys(self, seqid, iprot, oprot): + args = get_master_keys_args() + args.read(iprot) + iprot.readMessageEnd() + result = get_master_keys_result() + try: + result.success = self._handler.get_master_keys() + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("get_master_keys", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_get_open_txns(self, seqid, iprot, oprot): + args = get_open_txns_args() + args.read(iprot) + iprot.readMessageEnd() + result = get_open_txns_result() + try: + result.success = self._handler.get_open_txns() + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("get_open_txns", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_get_open_txns_info(self, seqid, iprot, oprot): + args = get_open_txns_info_args() + args.read(iprot) + iprot.readMessageEnd() + result = get_open_txns_info_result() + try: + result.success = self._handler.get_open_txns_info() + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("get_open_txns_info", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_open_txns(self, seqid, iprot, oprot): + args = open_txns_args() + args.read(iprot) + iprot.readMessageEnd() + result = open_txns_result() + try: + result.success = self._handler.open_txns(args.rqst) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("open_txns", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_abort_txn(self, seqid, iprot, oprot): + args = abort_txn_args() + args.read(iprot) + iprot.readMessageEnd() + result = abort_txn_result() + try: + self._handler.abort_txn(args.rqst) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except NoSuchTxnException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("abort_txn", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_abort_txns(self, seqid, iprot, oprot): + args = abort_txns_args() + args.read(iprot) + iprot.readMessageEnd() + result = abort_txns_result() + try: + self._handler.abort_txns(args.rqst) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except NoSuchTxnException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("abort_txns", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_commit_txn(self, seqid, iprot, oprot): + args = commit_txn_args() + args.read(iprot) + iprot.readMessageEnd() + result = commit_txn_result() + try: + self._handler.commit_txn(args.rqst) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except NoSuchTxnException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except TxnAbortedException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("commit_txn", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_get_latest_txnid_in_conflict(self, seqid, iprot, oprot): + args = get_latest_txnid_in_conflict_args() + args.read(iprot) + iprot.readMessageEnd() + result = get_latest_txnid_in_conflict_result() + try: + result.success = self._handler.get_latest_txnid_in_conflict(args.txnId) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except MetaException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("get_latest_txnid_in_conflict", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_repl_tbl_writeid_state(self, seqid, iprot, oprot): + args = repl_tbl_writeid_state_args() + args.read(iprot) + iprot.readMessageEnd() + result = repl_tbl_writeid_state_result() + try: + self._handler.repl_tbl_writeid_state(args.rqst) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("repl_tbl_writeid_state", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_get_valid_write_ids(self, seqid, iprot, oprot): + args = get_valid_write_ids_args() + args.read(iprot) + iprot.readMessageEnd() + result = get_valid_write_ids_result() + try: + result.success = self._handler.get_valid_write_ids(args.rqst) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except NoSuchTxnException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except MetaException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("get_valid_write_ids", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_allocate_table_write_ids(self, seqid, iprot, oprot): + args = allocate_table_write_ids_args() + args.read(iprot) + iprot.readMessageEnd() + result = allocate_table_write_ids_result() + try: + result.success = self._handler.allocate_table_write_ids(args.rqst) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except NoSuchTxnException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except TxnAbortedException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except MetaException as o3: + msg_type = TMessageType.REPLY + result.o3 = o3 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("allocate_table_write_ids", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_get_max_allocated_table_write_id(self, seqid, iprot, oprot): + args = get_max_allocated_table_write_id_args() + args.read(iprot) + iprot.readMessageEnd() + result = get_max_allocated_table_write_id_result() + try: + result.success = self._handler.get_max_allocated_table_write_id(args.rqst) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except MetaException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("get_max_allocated_table_write_id", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_seed_write_id(self, seqid, iprot, oprot): + args = seed_write_id_args() + args.read(iprot) + iprot.readMessageEnd() + result = seed_write_id_result() + try: + self._handler.seed_write_id(args.rqst) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except MetaException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("seed_write_id", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_seed_txn_id(self, seqid, iprot, oprot): + args = seed_txn_id_args() + args.read(iprot) + iprot.readMessageEnd() + result = seed_txn_id_result() + try: + self._handler.seed_txn_id(args.rqst) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except MetaException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("seed_txn_id", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_lock(self, seqid, iprot, oprot): + args = lock_args() + args.read(iprot) + iprot.readMessageEnd() + result = lock_result() + try: + result.success = self._handler.lock(args.rqst) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except NoSuchTxnException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except TxnAbortedException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("lock", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_check_lock(self, seqid, iprot, oprot): + args = check_lock_args() + args.read(iprot) + iprot.readMessageEnd() + result = check_lock_result() + try: + result.success = self._handler.check_lock(args.rqst) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except NoSuchTxnException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except TxnAbortedException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except NoSuchLockException as o3: + msg_type = TMessageType.REPLY + result.o3 = o3 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("check_lock", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_unlock(self, seqid, iprot, oprot): + args = unlock_args() + args.read(iprot) + iprot.readMessageEnd() + result = unlock_result() + try: + self._handler.unlock(args.rqst) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except NoSuchLockException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except TxnOpenException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("unlock", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_show_locks(self, seqid, iprot, oprot): + args = show_locks_args() + args.read(iprot) + iprot.readMessageEnd() + result = show_locks_result() + try: + result.success = self._handler.show_locks(args.rqst) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("show_locks", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_heartbeat(self, seqid, iprot, oprot): + args = heartbeat_args() + args.read(iprot) + iprot.readMessageEnd() + result = heartbeat_result() + try: + self._handler.heartbeat(args.ids) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except NoSuchLockException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except NoSuchTxnException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except TxnAbortedException as o3: + msg_type = TMessageType.REPLY + result.o3 = o3 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("heartbeat", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_heartbeat_txn_range(self, seqid, iprot, oprot): + args = heartbeat_txn_range_args() + args.read(iprot) + iprot.readMessageEnd() + result = heartbeat_txn_range_result() + try: + result.success = self._handler.heartbeat_txn_range(args.txns) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("heartbeat_txn_range", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_compact(self, seqid, iprot, oprot): + args = compact_args() + args.read(iprot) + iprot.readMessageEnd() + result = compact_result() + try: + self._handler.compact(args.rqst) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("compact", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_compact2(self, seqid, iprot, oprot): + args = compact2_args() + args.read(iprot) + iprot.readMessageEnd() + result = compact2_result() + try: + result.success = self._handler.compact2(args.rqst) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("compact2", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_show_compact(self, seqid, iprot, oprot): + args = show_compact_args() + args.read(iprot) + iprot.readMessageEnd() + result = show_compact_result() + try: + result.success = self._handler.show_compact(args.rqst) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("show_compact", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_add_dynamic_partitions(self, seqid, iprot, oprot): + args = add_dynamic_partitions_args() + args.read(iprot) + iprot.readMessageEnd() + result = add_dynamic_partitions_result() + try: + self._handler.add_dynamic_partitions(args.rqst) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except NoSuchTxnException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except TxnAbortedException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("add_dynamic_partitions", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_find_next_compact(self, seqid, iprot, oprot): + args = find_next_compact_args() + args.read(iprot) + iprot.readMessageEnd() + result = find_next_compact_result() + try: + result.success = self._handler.find_next_compact(args.workerId) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except MetaException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("find_next_compact", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_find_next_compact2(self, seqid, iprot, oprot): + args = find_next_compact2_args() + args.read(iprot) + iprot.readMessageEnd() + result = find_next_compact2_result() + try: + result.success = self._handler.find_next_compact2(args.rqst) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except MetaException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("find_next_compact2", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_update_compactor_state(self, seqid, iprot, oprot): + args = update_compactor_state_args() + args.read(iprot) + iprot.readMessageEnd() + result = update_compactor_state_result() + try: + self._handler.update_compactor_state(args.cr, args.txn_id) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("update_compactor_state", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_find_columns_with_stats(self, seqid, iprot, oprot): + args = find_columns_with_stats_args() + args.read(iprot) + iprot.readMessageEnd() + result = find_columns_with_stats_result() + try: + result.success = self._handler.find_columns_with_stats(args.cr) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("find_columns_with_stats", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_mark_cleaned(self, seqid, iprot, oprot): + args = mark_cleaned_args() + args.read(iprot) + iprot.readMessageEnd() + result = mark_cleaned_result() + try: + self._handler.mark_cleaned(args.cr) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except MetaException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("mark_cleaned", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_mark_compacted(self, seqid, iprot, oprot): + args = mark_compacted_args() + args.read(iprot) + iprot.readMessageEnd() + result = mark_compacted_result() + try: + self._handler.mark_compacted(args.cr) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except MetaException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("mark_compacted", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_mark_failed(self, seqid, iprot, oprot): + args = mark_failed_args() + args.read(iprot) + iprot.readMessageEnd() + result = mark_failed_result() + try: + self._handler.mark_failed(args.cr) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except MetaException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("mark_failed", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_mark_refused(self, seqid, iprot, oprot): + args = mark_refused_args() + args.read(iprot) + iprot.readMessageEnd() + result = mark_refused_result() + try: + self._handler.mark_refused(args.cr) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except MetaException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("mark_refused", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_update_compaction_metrics_data(self, seqid, iprot, oprot): + args = update_compaction_metrics_data_args() + args.read(iprot) + iprot.readMessageEnd() + result = update_compaction_metrics_data_result() + try: + result.success = self._handler.update_compaction_metrics_data(args.data) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except MetaException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("update_compaction_metrics_data", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_remove_compaction_metrics_data(self, seqid, iprot, oprot): + args = remove_compaction_metrics_data_args() + args.read(iprot) + iprot.readMessageEnd() + result = remove_compaction_metrics_data_result() + try: + self._handler.remove_compaction_metrics_data(args.request) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except MetaException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("remove_compaction_metrics_data", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_set_hadoop_jobid(self, seqid, iprot, oprot): + args = set_hadoop_jobid_args() + args.read(iprot) + iprot.readMessageEnd() + result = set_hadoop_jobid_result() + try: + self._handler.set_hadoop_jobid(args.jobId, args.cq_id) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("set_hadoop_jobid", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_get_latest_committed_compaction_info(self, seqid, iprot, oprot): + args = get_latest_committed_compaction_info_args() + args.read(iprot) + iprot.readMessageEnd() + result = get_latest_committed_compaction_info_result() + try: + result.success = self._handler.get_latest_committed_compaction_info(args.rqst) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("get_latest_committed_compaction_info", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_get_next_notification(self, seqid, iprot, oprot): + args = get_next_notification_args() + args.read(iprot) + iprot.readMessageEnd() + result = get_next_notification_result() + try: + result.success = self._handler.get_next_notification(args.rqst) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("get_next_notification", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_get_current_notificationEventId(self, seqid, iprot, oprot): + args = get_current_notificationEventId_args() + args.read(iprot) + iprot.readMessageEnd() + result = get_current_notificationEventId_result() + try: + result.success = self._handler.get_current_notificationEventId() + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("get_current_notificationEventId", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_get_notification_events_count(self, seqid, iprot, oprot): + args = get_notification_events_count_args() + args.read(iprot) + iprot.readMessageEnd() + result = get_notification_events_count_result() + try: + result.success = self._handler.get_notification_events_count(args.rqst) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("get_notification_events_count", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_fire_listener_event(self, seqid, iprot, oprot): + args = fire_listener_event_args() + args.read(iprot) + iprot.readMessageEnd() + result = fire_listener_event_result() + try: + result.success = self._handler.fire_listener_event(args.rqst) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("fire_listener_event", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_flushCache(self, seqid, iprot, oprot): + args = flushCache_args() + args.read(iprot) + iprot.readMessageEnd() + result = flushCache_result() + try: + self._handler.flushCache() + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("flushCache", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_add_write_notification_log(self, seqid, iprot, oprot): + args = add_write_notification_log_args() + args.read(iprot) + iprot.readMessageEnd() + result = add_write_notification_log_result() + try: + result.success = self._handler.add_write_notification_log(args.rqst) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("add_write_notification_log", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_add_write_notification_log_in_batch(self, seqid, iprot, oprot): + args = add_write_notification_log_in_batch_args() + args.read(iprot) + iprot.readMessageEnd() + result = add_write_notification_log_in_batch_result() + try: + result.success = self._handler.add_write_notification_log_in_batch(args.rqst) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("add_write_notification_log_in_batch", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_cm_recycle(self, seqid, iprot, oprot): + args = cm_recycle_args() + args.read(iprot) + iprot.readMessageEnd() + result = cm_recycle_result() + try: + result.success = self._handler.cm_recycle(args.request) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except MetaException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("cm_recycle", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_get_file_metadata_by_expr(self, seqid, iprot, oprot): + args = get_file_metadata_by_expr_args() + args.read(iprot) + iprot.readMessageEnd() + result = get_file_metadata_by_expr_result() + try: + result.success = self._handler.get_file_metadata_by_expr(args.req) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("get_file_metadata_by_expr", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_get_file_metadata(self, seqid, iprot, oprot): + args = get_file_metadata_args() + args.read(iprot) + iprot.readMessageEnd() + result = get_file_metadata_result() + try: + result.success = self._handler.get_file_metadata(args.req) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("get_file_metadata", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_put_file_metadata(self, seqid, iprot, oprot): + args = put_file_metadata_args() + args.read(iprot) + iprot.readMessageEnd() + result = put_file_metadata_result() + try: + result.success = self._handler.put_file_metadata(args.req) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("put_file_metadata", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_clear_file_metadata(self, seqid, iprot, oprot): + args = clear_file_metadata_args() + args.read(iprot) + iprot.readMessageEnd() + result = clear_file_metadata_result() + try: + result.success = self._handler.clear_file_metadata(args.req) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("clear_file_metadata", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_cache_file_metadata(self, seqid, iprot, oprot): + args = cache_file_metadata_args() + args.read(iprot) + iprot.readMessageEnd() + result = cache_file_metadata_result() + try: + result.success = self._handler.cache_file_metadata(args.req) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("cache_file_metadata", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_get_metastore_db_uuid(self, seqid, iprot, oprot): + args = get_metastore_db_uuid_args() + args.read(iprot) + iprot.readMessageEnd() + result = get_metastore_db_uuid_result() + try: + result.success = self._handler.get_metastore_db_uuid() + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except MetaException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("get_metastore_db_uuid", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_create_resource_plan(self, seqid, iprot, oprot): + args = create_resource_plan_args() + args.read(iprot) + iprot.readMessageEnd() + result = create_resource_plan_result() + try: + result.success = self._handler.create_resource_plan(args.request) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except AlreadyExistsException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except InvalidObjectException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except MetaException as o3: + msg_type = TMessageType.REPLY + result.o3 = o3 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("create_resource_plan", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_get_resource_plan(self, seqid, iprot, oprot): + args = get_resource_plan_args() + args.read(iprot) + iprot.readMessageEnd() + result = get_resource_plan_result() + try: + result.success = self._handler.get_resource_plan(args.request) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except NoSuchObjectException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except MetaException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("get_resource_plan", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_get_active_resource_plan(self, seqid, iprot, oprot): + args = get_active_resource_plan_args() + args.read(iprot) + iprot.readMessageEnd() + result = get_active_resource_plan_result() + try: + result.success = self._handler.get_active_resource_plan(args.request) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except MetaException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("get_active_resource_plan", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_get_all_resource_plans(self, seqid, iprot, oprot): + args = get_all_resource_plans_args() + args.read(iprot) + iprot.readMessageEnd() + result = get_all_resource_plans_result() + try: + result.success = self._handler.get_all_resource_plans(args.request) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except MetaException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("get_all_resource_plans", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_alter_resource_plan(self, seqid, iprot, oprot): + args = alter_resource_plan_args() + args.read(iprot) + iprot.readMessageEnd() + result = alter_resource_plan_result() + try: + result.success = self._handler.alter_resource_plan(args.request) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except NoSuchObjectException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except InvalidOperationException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except MetaException as o3: + msg_type = TMessageType.REPLY + result.o3 = o3 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("alter_resource_plan", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_validate_resource_plan(self, seqid, iprot, oprot): + args = validate_resource_plan_args() + args.read(iprot) + iprot.readMessageEnd() + result = validate_resource_plan_result() + try: + result.success = self._handler.validate_resource_plan(args.request) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except NoSuchObjectException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except MetaException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("validate_resource_plan", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_drop_resource_plan(self, seqid, iprot, oprot): + args = drop_resource_plan_args() + args.read(iprot) + iprot.readMessageEnd() + result = drop_resource_plan_result() + try: + result.success = self._handler.drop_resource_plan(args.request) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except NoSuchObjectException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except InvalidOperationException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except MetaException as o3: + msg_type = TMessageType.REPLY + result.o3 = o3 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("drop_resource_plan", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_create_wm_trigger(self, seqid, iprot, oprot): + args = create_wm_trigger_args() + args.read(iprot) + iprot.readMessageEnd() + result = create_wm_trigger_result() + try: + result.success = self._handler.create_wm_trigger(args.request) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except AlreadyExistsException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except NoSuchObjectException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except InvalidObjectException as o3: + msg_type = TMessageType.REPLY + result.o3 = o3 + except MetaException as o4: + msg_type = TMessageType.REPLY + result.o4 = o4 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("create_wm_trigger", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_alter_wm_trigger(self, seqid, iprot, oprot): + args = alter_wm_trigger_args() + args.read(iprot) + iprot.readMessageEnd() + result = alter_wm_trigger_result() + try: + result.success = self._handler.alter_wm_trigger(args.request) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except NoSuchObjectException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except InvalidObjectException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except MetaException as o3: + msg_type = TMessageType.REPLY + result.o3 = o3 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("alter_wm_trigger", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_drop_wm_trigger(self, seqid, iprot, oprot): + args = drop_wm_trigger_args() + args.read(iprot) + iprot.readMessageEnd() + result = drop_wm_trigger_result() + try: + result.success = self._handler.drop_wm_trigger(args.request) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except NoSuchObjectException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except InvalidOperationException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except MetaException as o3: + msg_type = TMessageType.REPLY + result.o3 = o3 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("drop_wm_trigger", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_get_triggers_for_resourceplan(self, seqid, iprot, oprot): + args = get_triggers_for_resourceplan_args() + args.read(iprot) + iprot.readMessageEnd() + result = get_triggers_for_resourceplan_result() + try: + result.success = self._handler.get_triggers_for_resourceplan(args.request) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except NoSuchObjectException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except MetaException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("get_triggers_for_resourceplan", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_create_wm_pool(self, seqid, iprot, oprot): + args = create_wm_pool_args() + args.read(iprot) + iprot.readMessageEnd() + result = create_wm_pool_result() + try: + result.success = self._handler.create_wm_pool(args.request) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except AlreadyExistsException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except NoSuchObjectException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except InvalidObjectException as o3: + msg_type = TMessageType.REPLY + result.o3 = o3 + except MetaException as o4: + msg_type = TMessageType.REPLY + result.o4 = o4 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("create_wm_pool", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_alter_wm_pool(self, seqid, iprot, oprot): + args = alter_wm_pool_args() + args.read(iprot) + iprot.readMessageEnd() + result = alter_wm_pool_result() + try: + result.success = self._handler.alter_wm_pool(args.request) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except AlreadyExistsException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except NoSuchObjectException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except InvalidObjectException as o3: + msg_type = TMessageType.REPLY + result.o3 = o3 + except MetaException as o4: + msg_type = TMessageType.REPLY + result.o4 = o4 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("alter_wm_pool", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_drop_wm_pool(self, seqid, iprot, oprot): + args = drop_wm_pool_args() + args.read(iprot) + iprot.readMessageEnd() + result = drop_wm_pool_result() + try: + result.success = self._handler.drop_wm_pool(args.request) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except NoSuchObjectException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except InvalidOperationException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except MetaException as o3: + msg_type = TMessageType.REPLY + result.o3 = o3 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("drop_wm_pool", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_create_or_update_wm_mapping(self, seqid, iprot, oprot): + args = create_or_update_wm_mapping_args() + args.read(iprot) + iprot.readMessageEnd() + result = create_or_update_wm_mapping_result() + try: + result.success = self._handler.create_or_update_wm_mapping(args.request) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except AlreadyExistsException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except NoSuchObjectException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except InvalidObjectException as o3: + msg_type = TMessageType.REPLY + result.o3 = o3 + except MetaException as o4: + msg_type = TMessageType.REPLY + result.o4 = o4 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("create_or_update_wm_mapping", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_drop_wm_mapping(self, seqid, iprot, oprot): + args = drop_wm_mapping_args() + args.read(iprot) + iprot.readMessageEnd() + result = drop_wm_mapping_result() + try: + result.success = self._handler.drop_wm_mapping(args.request) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except NoSuchObjectException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except InvalidOperationException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except MetaException as o3: + msg_type = TMessageType.REPLY + result.o3 = o3 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("drop_wm_mapping", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_create_or_drop_wm_trigger_to_pool_mapping(self, seqid, iprot, oprot): + args = create_or_drop_wm_trigger_to_pool_mapping_args() + args.read(iprot) + iprot.readMessageEnd() + result = create_or_drop_wm_trigger_to_pool_mapping_result() + try: + result.success = self._handler.create_or_drop_wm_trigger_to_pool_mapping(args.request) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except AlreadyExistsException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except NoSuchObjectException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except InvalidObjectException as o3: + msg_type = TMessageType.REPLY + result.o3 = o3 + except MetaException as o4: + msg_type = TMessageType.REPLY + result.o4 = o4 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("create_or_drop_wm_trigger_to_pool_mapping", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_create_ischema(self, seqid, iprot, oprot): + args = create_ischema_args() + args.read(iprot) + iprot.readMessageEnd() + result = create_ischema_result() + try: + self._handler.create_ischema(args.schema) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except AlreadyExistsException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except NoSuchObjectException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except MetaException as o3: + msg_type = TMessageType.REPLY + result.o3 = o3 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("create_ischema", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_alter_ischema(self, seqid, iprot, oprot): + args = alter_ischema_args() + args.read(iprot) + iprot.readMessageEnd() + result = alter_ischema_result() + try: + self._handler.alter_ischema(args.rqst) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except NoSuchObjectException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except MetaException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("alter_ischema", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_get_ischema(self, seqid, iprot, oprot): + args = get_ischema_args() + args.read(iprot) + iprot.readMessageEnd() + result = get_ischema_result() + try: + result.success = self._handler.get_ischema(args.name) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except NoSuchObjectException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except MetaException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("get_ischema", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_drop_ischema(self, seqid, iprot, oprot): + args = drop_ischema_args() + args.read(iprot) + iprot.readMessageEnd() + result = drop_ischema_result() + try: + self._handler.drop_ischema(args.name) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except NoSuchObjectException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except InvalidOperationException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except MetaException as o3: + msg_type = TMessageType.REPLY + result.o3 = o3 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("drop_ischema", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_add_schema_version(self, seqid, iprot, oprot): + args = add_schema_version_args() + args.read(iprot) + iprot.readMessageEnd() + result = add_schema_version_result() + try: + self._handler.add_schema_version(args.schemaVersion) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except AlreadyExistsException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except NoSuchObjectException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except MetaException as o3: + msg_type = TMessageType.REPLY + result.o3 = o3 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("add_schema_version", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_get_schema_version(self, seqid, iprot, oprot): + args = get_schema_version_args() + args.read(iprot) + iprot.readMessageEnd() + result = get_schema_version_result() + try: + result.success = self._handler.get_schema_version(args.schemaVersion) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except NoSuchObjectException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except MetaException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("get_schema_version", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_get_schema_latest_version(self, seqid, iprot, oprot): + args = get_schema_latest_version_args() + args.read(iprot) + iprot.readMessageEnd() + result = get_schema_latest_version_result() + try: + result.success = self._handler.get_schema_latest_version(args.schemaName) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except NoSuchObjectException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except MetaException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("get_schema_latest_version", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_get_schema_all_versions(self, seqid, iprot, oprot): + args = get_schema_all_versions_args() + args.read(iprot) + iprot.readMessageEnd() + result = get_schema_all_versions_result() + try: + result.success = self._handler.get_schema_all_versions(args.schemaName) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except NoSuchObjectException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except MetaException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("get_schema_all_versions", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_drop_schema_version(self, seqid, iprot, oprot): + args = drop_schema_version_args() + args.read(iprot) + iprot.readMessageEnd() + result = drop_schema_version_result() + try: + self._handler.drop_schema_version(args.schemaVersion) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except NoSuchObjectException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except MetaException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("drop_schema_version", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_get_schemas_by_cols(self, seqid, iprot, oprot): + args = get_schemas_by_cols_args() + args.read(iprot) + iprot.readMessageEnd() + result = get_schemas_by_cols_result() + try: + result.success = self._handler.get_schemas_by_cols(args.rqst) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except MetaException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("get_schemas_by_cols", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_map_schema_version_to_serde(self, seqid, iprot, oprot): + args = map_schema_version_to_serde_args() + args.read(iprot) + iprot.readMessageEnd() + result = map_schema_version_to_serde_result() + try: + self._handler.map_schema_version_to_serde(args.rqst) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except NoSuchObjectException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except MetaException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("map_schema_version_to_serde", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_set_schema_version_state(self, seqid, iprot, oprot): + args = set_schema_version_state_args() + args.read(iprot) + iprot.readMessageEnd() + result = set_schema_version_state_result() + try: + self._handler.set_schema_version_state(args.rqst) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except NoSuchObjectException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except InvalidOperationException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except MetaException as o3: + msg_type = TMessageType.REPLY + result.o3 = o3 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("set_schema_version_state", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_add_serde(self, seqid, iprot, oprot): + args = add_serde_args() + args.read(iprot) + iprot.readMessageEnd() + result = add_serde_result() + try: + self._handler.add_serde(args.serde) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except AlreadyExistsException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except MetaException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("add_serde", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_get_serde(self, seqid, iprot, oprot): + args = get_serde_args() + args.read(iprot) + iprot.readMessageEnd() + result = get_serde_result() + try: + result.success = self._handler.get_serde(args.rqst) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except NoSuchObjectException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except MetaException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("get_serde", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_get_lock_materialization_rebuild(self, seqid, iprot, oprot): + args = get_lock_materialization_rebuild_args() + args.read(iprot) + iprot.readMessageEnd() + result = get_lock_materialization_rebuild_result() + try: + result.success = self._handler.get_lock_materialization_rebuild(args.dbName, args.tableName, args.txnId) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("get_lock_materialization_rebuild", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_heartbeat_lock_materialization_rebuild(self, seqid, iprot, oprot): + args = heartbeat_lock_materialization_rebuild_args() + args.read(iprot) + iprot.readMessageEnd() + result = heartbeat_lock_materialization_rebuild_result() + try: + result.success = self._handler.heartbeat_lock_materialization_rebuild(args.dbName, args.tableName, args.txnId) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("heartbeat_lock_materialization_rebuild", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_add_runtime_stats(self, seqid, iprot, oprot): + args = add_runtime_stats_args() + args.read(iprot) + iprot.readMessageEnd() + result = add_runtime_stats_result() + try: + self._handler.add_runtime_stats(args.stat) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except MetaException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("add_runtime_stats", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_get_runtime_stats(self, seqid, iprot, oprot): + args = get_runtime_stats_args() + args.read(iprot) + iprot.readMessageEnd() + result = get_runtime_stats_result() + try: + result.success = self._handler.get_runtime_stats(args.rqst) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except MetaException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("get_runtime_stats", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_get_partitions_with_specs(self, seqid, iprot, oprot): + args = get_partitions_with_specs_args() + args.read(iprot) + iprot.readMessageEnd() + result = get_partitions_with_specs_result() + try: + result.success = self._handler.get_partitions_with_specs(args.request) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except MetaException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("get_partitions_with_specs", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_scheduled_query_poll(self, seqid, iprot, oprot): + args = scheduled_query_poll_args() + args.read(iprot) + iprot.readMessageEnd() + result = scheduled_query_poll_result() + try: + result.success = self._handler.scheduled_query_poll(args.request) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except MetaException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("scheduled_query_poll", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_scheduled_query_maintenance(self, seqid, iprot, oprot): + args = scheduled_query_maintenance_args() + args.read(iprot) + iprot.readMessageEnd() + result = scheduled_query_maintenance_result() + try: + self._handler.scheduled_query_maintenance(args.request) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except MetaException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except NoSuchObjectException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except AlreadyExistsException as o3: + msg_type = TMessageType.REPLY + result.o3 = o3 + except InvalidInputException as o4: + msg_type = TMessageType.REPLY + result.o4 = o4 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("scheduled_query_maintenance", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_scheduled_query_progress(self, seqid, iprot, oprot): + args = scheduled_query_progress_args() + args.read(iprot) + iprot.readMessageEnd() + result = scheduled_query_progress_result() + try: + self._handler.scheduled_query_progress(args.info) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except MetaException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except InvalidOperationException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("scheduled_query_progress", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_get_scheduled_query(self, seqid, iprot, oprot): + args = get_scheduled_query_args() + args.read(iprot) + iprot.readMessageEnd() + result = get_scheduled_query_result() + try: + result.success = self._handler.get_scheduled_query(args.scheduleKey) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except MetaException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except NoSuchObjectException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("get_scheduled_query", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_add_replication_metrics(self, seqid, iprot, oprot): + args = add_replication_metrics_args() + args.read(iprot) + iprot.readMessageEnd() + result = add_replication_metrics_result() + try: + self._handler.add_replication_metrics(args.replicationMetricList) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except MetaException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("add_replication_metrics", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_get_replication_metrics(self, seqid, iprot, oprot): + args = get_replication_metrics_args() + args.read(iprot) + iprot.readMessageEnd() + result = get_replication_metrics_result() + try: + result.success = self._handler.get_replication_metrics(args.rqst) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except MetaException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("get_replication_metrics", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_get_open_txns_req(self, seqid, iprot, oprot): + args = get_open_txns_req_args() + args.read(iprot) + iprot.readMessageEnd() + result = get_open_txns_req_result() + try: + result.success = self._handler.get_open_txns_req(args.getOpenTxnsRequest) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("get_open_txns_req", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_create_stored_procedure(self, seqid, iprot, oprot): + args = create_stored_procedure_args() + args.read(iprot) + iprot.readMessageEnd() + result = create_stored_procedure_result() + try: + self._handler.create_stored_procedure(args.proc) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except NoSuchObjectException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except MetaException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("create_stored_procedure", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_get_stored_procedure(self, seqid, iprot, oprot): + args = get_stored_procedure_args() + args.read(iprot) + iprot.readMessageEnd() + result = get_stored_procedure_result() + try: + result.success = self._handler.get_stored_procedure(args.request) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except MetaException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except NoSuchObjectException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("get_stored_procedure", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_drop_stored_procedure(self, seqid, iprot, oprot): + args = drop_stored_procedure_args() + args.read(iprot) + iprot.readMessageEnd() + result = drop_stored_procedure_result() + try: + self._handler.drop_stored_procedure(args.request) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except MetaException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("drop_stored_procedure", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_get_all_stored_procedures(self, seqid, iprot, oprot): + args = get_all_stored_procedures_args() + args.read(iprot) + iprot.readMessageEnd() + result = get_all_stored_procedures_result() + try: + result.success = self._handler.get_all_stored_procedures(args.request) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except MetaException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("get_all_stored_procedures", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_find_package(self, seqid, iprot, oprot): + args = find_package_args() + args.read(iprot) + iprot.readMessageEnd() + result = find_package_result() + try: + result.success = self._handler.find_package(args.request) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except MetaException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except NoSuchObjectException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("find_package", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_add_package(self, seqid, iprot, oprot): + args = add_package_args() + args.read(iprot) + iprot.readMessageEnd() + result = add_package_result() + try: + self._handler.add_package(args.request) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except MetaException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("add_package", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_get_all_packages(self, seqid, iprot, oprot): + args = get_all_packages_args() + args.read(iprot) + iprot.readMessageEnd() + result = get_all_packages_result() + try: + result.success = self._handler.get_all_packages(args.request) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except MetaException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("get_all_packages", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_drop_package(self, seqid, iprot, oprot): + args = drop_package_args() + args.read(iprot) + iprot.readMessageEnd() + result = drop_package_result() + try: + self._handler.drop_package(args.request) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except MetaException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("drop_package", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_get_all_write_event_info(self, seqid, iprot, oprot): + args = get_all_write_event_info_args() + args.read(iprot) + iprot.readMessageEnd() + result = get_all_write_event_info_result() + try: + result.success = self._handler.get_all_write_event_info(args.request) + msg_type = TMessageType.REPLY + except TTransport.TTransportException: + raise + except MetaException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except TApplicationException as ex: + logging.exception("TApplication exception in handler") + msg_type = TMessageType.EXCEPTION + result = ex + except Exception: + logging.exception("Unexpected exception in handler") + msg_type = TMessageType.EXCEPTION + result = TApplicationException(TApplicationException.INTERNAL_ERROR, "Internal error") + oprot.writeMessageBegin("get_all_write_event_info", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + +# HELPER FUNCTIONS AND STRUCTURES + + +class getMetaConf_args: + """ + Attributes: + - key + + """ + + def __init__( + self, + key=None, + ): + self.key = key + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.key = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("getMetaConf_args") + if self.key is not None: + oprot.writeFieldBegin("key", TType.STRING, 1) + oprot.writeString(self.key.encode("utf-8") if sys.version_info[0] == 2 else self.key) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(getMetaConf_args) +getMetaConf_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "key", + "UTF8", + None, + ), # 1 +) + + +class getMetaConf_result: + """ + Attributes: + - success + - o1 + + """ + + def __init__( + self, + success=None, + o1=None, + ): + self.success = success + self.o1 = o1 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRING: + self.success = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("getMetaConf_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.STRING, 0) + oprot.writeString(self.success.encode("utf-8") if sys.version_info[0] == 2 else self.success) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(getMetaConf_result) +getMetaConf_result.thrift_spec = ( + ( + 0, + TType.STRING, + "success", + "UTF8", + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [MetaException, None], + None, + ), # 1 +) + + +class setMetaConf_args: + """ + Attributes: + - key + - value + + """ + + def __init__( + self, + key=None, + value=None, + ): + self.key = key + self.value = value + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.key = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.value = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("setMetaConf_args") + if self.key is not None: + oprot.writeFieldBegin("key", TType.STRING, 1) + oprot.writeString(self.key.encode("utf-8") if sys.version_info[0] == 2 else self.key) + oprot.writeFieldEnd() + if self.value is not None: + oprot.writeFieldBegin("value", TType.STRING, 2) + oprot.writeString(self.value.encode("utf-8") if sys.version_info[0] == 2 else self.value) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(setMetaConf_args) +setMetaConf_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "key", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "value", + "UTF8", + None, + ), # 2 +) + + +class setMetaConf_result: + """ + Attributes: + - o1 + + """ + + def __init__( + self, + o1=None, + ): + self.o1 = o1 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("setMetaConf_result") + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(setMetaConf_result) +setMetaConf_result.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "o1", + [MetaException, None], + None, + ), # 1 +) + + +class create_catalog_args: + """ + Attributes: + - catalog + + """ + + def __init__( + self, + catalog=None, + ): + self.catalog = catalog + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.catalog = CreateCatalogRequest() + self.catalog.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("create_catalog_args") + if self.catalog is not None: + oprot.writeFieldBegin("catalog", TType.STRUCT, 1) + self.catalog.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(create_catalog_args) +create_catalog_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "catalog", + [CreateCatalogRequest, None], + None, + ), # 1 +) + + +class create_catalog_result: + """ + Attributes: + - o1 + - o2 + - o3 + + """ + + def __init__( + self, + o1=None, + o2=None, + o3=None, + ): + self.o1 = o1 + self.o2 = o2 + self.o3 = o3 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.o1 = AlreadyExistsException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = InvalidObjectException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRUCT: + self.o3 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("create_catalog_result") + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + if self.o3 is not None: + oprot.writeFieldBegin("o3", TType.STRUCT, 3) + self.o3.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(create_catalog_result) +create_catalog_result.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "o1", + [AlreadyExistsException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [InvalidObjectException, None], + None, + ), # 2 + ( + 3, + TType.STRUCT, + "o3", + [MetaException, None], + None, + ), # 3 +) + + +class alter_catalog_args: + """ + Attributes: + - rqst + + """ + + def __init__( + self, + rqst=None, + ): + self.rqst = rqst + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.rqst = AlterCatalogRequest() + self.rqst.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("alter_catalog_args") + if self.rqst is not None: + oprot.writeFieldBegin("rqst", TType.STRUCT, 1) + self.rqst.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(alter_catalog_args) +alter_catalog_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "rqst", + [AlterCatalogRequest, None], + None, + ), # 1 +) + + +class alter_catalog_result: + """ + Attributes: + - o1 + - o2 + - o3 + + """ + + def __init__( + self, + o1=None, + o2=None, + o3=None, + ): + self.o1 = o1 + self.o2 = o2 + self.o3 = o3 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.o1 = NoSuchObjectException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = InvalidOperationException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRUCT: + self.o3 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("alter_catalog_result") + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + if self.o3 is not None: + oprot.writeFieldBegin("o3", TType.STRUCT, 3) + self.o3.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(alter_catalog_result) +alter_catalog_result.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "o1", + [NoSuchObjectException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [InvalidOperationException, None], + None, + ), # 2 + ( + 3, + TType.STRUCT, + "o3", + [MetaException, None], + None, + ), # 3 +) + + +class get_catalog_args: + """ + Attributes: + - catName + + """ + + def __init__( + self, + catName=None, + ): + self.catName = catName + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.catName = GetCatalogRequest() + self.catName.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_catalog_args") + if self.catName is not None: + oprot.writeFieldBegin("catName", TType.STRUCT, 1) + self.catName.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_catalog_args) +get_catalog_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "catName", + [GetCatalogRequest, None], + None, + ), # 1 +) + + +class get_catalog_result: + """ + Attributes: + - success + - o1 + - o2 + + """ + + def __init__( + self, + success=None, + o1=None, + o2=None, + ): + self.success = success + self.o1 = o1 + self.o2 = o2 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = GetCatalogResponse() + self.success.read(iprot) + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = NoSuchObjectException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_catalog_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_catalog_result) +get_catalog_result.thrift_spec = ( + ( + 0, + TType.STRUCT, + "success", + [GetCatalogResponse, None], + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [NoSuchObjectException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [MetaException, None], + None, + ), # 2 +) + + +class get_catalogs_args: + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_catalogs_args") + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_catalogs_args) +get_catalogs_args.thrift_spec = () + + +class get_catalogs_result: + """ + Attributes: + - success + - o1 + + """ + + def __init__( + self, + success=None, + o1=None, + ): + self.success = success + self.o1 = o1 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = GetCatalogsResponse() + self.success.read(iprot) + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_catalogs_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_catalogs_result) +get_catalogs_result.thrift_spec = ( + ( + 0, + TType.STRUCT, + "success", + [GetCatalogsResponse, None], + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [MetaException, None], + None, + ), # 1 +) + + +class drop_catalog_args: + """ + Attributes: + - catName + + """ + + def __init__( + self, + catName=None, + ): + self.catName = catName + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.catName = DropCatalogRequest() + self.catName.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("drop_catalog_args") + if self.catName is not None: + oprot.writeFieldBegin("catName", TType.STRUCT, 1) + self.catName.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(drop_catalog_args) +drop_catalog_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "catName", + [DropCatalogRequest, None], + None, + ), # 1 +) + + +class drop_catalog_result: + """ + Attributes: + - o1 + - o2 + - o3 + + """ + + def __init__( + self, + o1=None, + o2=None, + o3=None, + ): + self.o1 = o1 + self.o2 = o2 + self.o3 = o3 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.o1 = NoSuchObjectException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = InvalidOperationException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRUCT: + self.o3 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("drop_catalog_result") + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + if self.o3 is not None: + oprot.writeFieldBegin("o3", TType.STRUCT, 3) + self.o3.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(drop_catalog_result) +drop_catalog_result.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "o1", + [NoSuchObjectException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [InvalidOperationException, None], + None, + ), # 2 + ( + 3, + TType.STRUCT, + "o3", + [MetaException, None], + None, + ), # 3 +) + + +class create_database_args: + """ + Attributes: + - database + + """ + + def __init__( + self, + database=None, + ): + self.database = database + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.database = Database() + self.database.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("create_database_args") + if self.database is not None: + oprot.writeFieldBegin("database", TType.STRUCT, 1) + self.database.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(create_database_args) +create_database_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "database", + [Database, None], + None, + ), # 1 +) + + +class create_database_result: + """ + Attributes: + - o1 + - o2 + - o3 + + """ + + def __init__( + self, + o1=None, + o2=None, + o3=None, + ): + self.o1 = o1 + self.o2 = o2 + self.o3 = o3 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.o1 = AlreadyExistsException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = InvalidObjectException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRUCT: + self.o3 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("create_database_result") + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + if self.o3 is not None: + oprot.writeFieldBegin("o3", TType.STRUCT, 3) + self.o3.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(create_database_result) +create_database_result.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "o1", + [AlreadyExistsException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [InvalidObjectException, None], + None, + ), # 2 + ( + 3, + TType.STRUCT, + "o3", + [MetaException, None], + None, + ), # 3 +) + + +class get_database_args: + """ + Attributes: + - name + + """ + + def __init__( + self, + name=None, + ): + self.name = name + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_database_args") + if self.name is not None: + oprot.writeFieldBegin("name", TType.STRING, 1) + oprot.writeString(self.name.encode("utf-8") if sys.version_info[0] == 2 else self.name) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_database_args) +get_database_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "name", + "UTF8", + None, + ), # 1 +) + + +class get_database_result: + """ + Attributes: + - success + - o1 + - o2 + + """ + + def __init__( + self, + success=None, + o1=None, + o2=None, + ): + self.success = success + self.o1 = o1 + self.o2 = o2 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = Database() + self.success.read(iprot) + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = NoSuchObjectException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_database_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_database_result) +get_database_result.thrift_spec = ( + ( + 0, + TType.STRUCT, + "success", + [Database, None], + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [NoSuchObjectException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [MetaException, None], + None, + ), # 2 +) + + +class get_database_req_args: + """ + Attributes: + - request + + """ + + def __init__( + self, + request=None, + ): + self.request = request + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.request = GetDatabaseRequest() + self.request.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_database_req_args") + if self.request is not None: + oprot.writeFieldBegin("request", TType.STRUCT, 1) + self.request.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_database_req_args) +get_database_req_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "request", + [GetDatabaseRequest, None], + None, + ), # 1 +) + + +class get_database_req_result: + """ + Attributes: + - success + - o1 + - o2 + + """ + + def __init__( + self, + success=None, + o1=None, + o2=None, + ): + self.success = success + self.o1 = o1 + self.o2 = o2 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = Database() + self.success.read(iprot) + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = NoSuchObjectException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_database_req_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_database_req_result) +get_database_req_result.thrift_spec = ( + ( + 0, + TType.STRUCT, + "success", + [Database, None], + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [NoSuchObjectException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [MetaException, None], + None, + ), # 2 +) + + +class drop_database_args: + """ + Attributes: + - name + - deleteData + - cascade + + """ + + def __init__( + self, + name=None, + deleteData=None, + cascade=None, + ): + self.name = name + self.deleteData = deleteData + self.cascade = cascade + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.BOOL: + self.deleteData = iprot.readBool() + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.BOOL: + self.cascade = iprot.readBool() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("drop_database_args") + if self.name is not None: + oprot.writeFieldBegin("name", TType.STRING, 1) + oprot.writeString(self.name.encode("utf-8") if sys.version_info[0] == 2 else self.name) + oprot.writeFieldEnd() + if self.deleteData is not None: + oprot.writeFieldBegin("deleteData", TType.BOOL, 2) + oprot.writeBool(self.deleteData) + oprot.writeFieldEnd() + if self.cascade is not None: + oprot.writeFieldBegin("cascade", TType.BOOL, 3) + oprot.writeBool(self.cascade) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(drop_database_args) +drop_database_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "name", + "UTF8", + None, + ), # 1 + ( + 2, + TType.BOOL, + "deleteData", + None, + None, + ), # 2 + ( + 3, + TType.BOOL, + "cascade", + None, + None, + ), # 3 +) + + +class drop_database_result: + """ + Attributes: + - o1 + - o2 + - o3 + + """ + + def __init__( + self, + o1=None, + o2=None, + o3=None, + ): + self.o1 = o1 + self.o2 = o2 + self.o3 = o3 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.o1 = NoSuchObjectException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = InvalidOperationException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRUCT: + self.o3 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("drop_database_result") + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + if self.o3 is not None: + oprot.writeFieldBegin("o3", TType.STRUCT, 3) + self.o3.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(drop_database_result) +drop_database_result.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "o1", + [NoSuchObjectException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [InvalidOperationException, None], + None, + ), # 2 + ( + 3, + TType.STRUCT, + "o3", + [MetaException, None], + None, + ), # 3 +) + + +class drop_database_req_args: + """ + Attributes: + - req + + """ + + def __init__( + self, + req=None, + ): + self.req = req + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.req = DropDatabaseRequest() + self.req.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("drop_database_req_args") + if self.req is not None: + oprot.writeFieldBegin("req", TType.STRUCT, 1) + self.req.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(drop_database_req_args) +drop_database_req_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "req", + [DropDatabaseRequest, None], + None, + ), # 1 +) + + +class drop_database_req_result: + """ + Attributes: + - o1 + - o2 + - o3 + + """ + + def __init__( + self, + o1=None, + o2=None, + o3=None, + ): + self.o1 = o1 + self.o2 = o2 + self.o3 = o3 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.o1 = NoSuchObjectException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = InvalidOperationException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRUCT: + self.o3 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("drop_database_req_result") + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + if self.o3 is not None: + oprot.writeFieldBegin("o3", TType.STRUCT, 3) + self.o3.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(drop_database_req_result) +drop_database_req_result.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "o1", + [NoSuchObjectException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [InvalidOperationException, None], + None, + ), # 2 + ( + 3, + TType.STRUCT, + "o3", + [MetaException, None], + None, + ), # 3 +) + + +class get_databases_args: + """ + Attributes: + - pattern + + """ + + def __init__( + self, + pattern=None, + ): + self.pattern = pattern + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.pattern = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_databases_args") + if self.pattern is not None: + oprot.writeFieldBegin("pattern", TType.STRING, 1) + oprot.writeString(self.pattern.encode("utf-8") if sys.version_info[0] == 2 else self.pattern) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_databases_args) +get_databases_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "pattern", + "UTF8", + None, + ), # 1 +) + + +class get_databases_result: + """ + Attributes: + - success + - o1 + + """ + + def __init__( + self, + success=None, + o1=None, + ): + self.success = success + self.o1 = o1 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.LIST: + self.success = [] + (_etype1252, _size1249) = iprot.readListBegin() + for _i1253 in range(_size1249): + _elem1254 = ( + iprot.readString().decode("utf-8", errors="replace") + if sys.version_info[0] == 2 + else iprot.readString() + ) + self.success.append(_elem1254) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_databases_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.LIST, 0) + oprot.writeListBegin(TType.STRING, len(self.success)) + for iter1255 in self.success: + oprot.writeString(iter1255.encode("utf-8") if sys.version_info[0] == 2 else iter1255) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_databases_result) +get_databases_result.thrift_spec = ( + ( + 0, + TType.LIST, + "success", + (TType.STRING, "UTF8", False), + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [MetaException, None], + None, + ), # 1 +) + + +class get_all_databases_args: + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_all_databases_args") + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_all_databases_args) +get_all_databases_args.thrift_spec = () + + +class get_all_databases_result: + """ + Attributes: + - success + - o1 + + """ + + def __init__( + self, + success=None, + o1=None, + ): + self.success = success + self.o1 = o1 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.LIST: + self.success = [] + (_etype1259, _size1256) = iprot.readListBegin() + for _i1260 in range(_size1256): + _elem1261 = ( + iprot.readString().decode("utf-8", errors="replace") + if sys.version_info[0] == 2 + else iprot.readString() + ) + self.success.append(_elem1261) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_all_databases_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.LIST, 0) + oprot.writeListBegin(TType.STRING, len(self.success)) + for iter1262 in self.success: + oprot.writeString(iter1262.encode("utf-8") if sys.version_info[0] == 2 else iter1262) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_all_databases_result) +get_all_databases_result.thrift_spec = ( + ( + 0, + TType.LIST, + "success", + (TType.STRING, "UTF8", False), + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [MetaException, None], + None, + ), # 1 +) + + +class alter_database_args: + """ + Attributes: + - dbname + - db + + """ + + def __init__( + self, + dbname=None, + db=None, + ): + self.dbname = dbname + self.db = db + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.dbname = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.db = Database() + self.db.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("alter_database_args") + if self.dbname is not None: + oprot.writeFieldBegin("dbname", TType.STRING, 1) + oprot.writeString(self.dbname.encode("utf-8") if sys.version_info[0] == 2 else self.dbname) + oprot.writeFieldEnd() + if self.db is not None: + oprot.writeFieldBegin("db", TType.STRUCT, 2) + self.db.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(alter_database_args) +alter_database_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "dbname", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRUCT, + "db", + [Database, None], + None, + ), # 2 +) + + +class alter_database_result: + """ + Attributes: + - o1 + - o2 + + """ + + def __init__( + self, + o1=None, + o2=None, + ): + self.o1 = o1 + self.o2 = o2 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = NoSuchObjectException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("alter_database_result") + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(alter_database_result) +alter_database_result.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "o1", + [MetaException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [NoSuchObjectException, None], + None, + ), # 2 +) + + +class create_dataconnector_args: + """ + Attributes: + - connector + + """ + + def __init__( + self, + connector=None, + ): + self.connector = connector + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.connector = DataConnector() + self.connector.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("create_dataconnector_args") + if self.connector is not None: + oprot.writeFieldBegin("connector", TType.STRUCT, 1) + self.connector.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(create_dataconnector_args) +create_dataconnector_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "connector", + [DataConnector, None], + None, + ), # 1 +) + + +class create_dataconnector_result: + """ + Attributes: + - o1 + - o2 + - o3 + + """ + + def __init__( + self, + o1=None, + o2=None, + o3=None, + ): + self.o1 = o1 + self.o2 = o2 + self.o3 = o3 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.o1 = AlreadyExistsException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = InvalidObjectException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRUCT: + self.o3 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("create_dataconnector_result") + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + if self.o3 is not None: + oprot.writeFieldBegin("o3", TType.STRUCT, 3) + self.o3.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(create_dataconnector_result) +create_dataconnector_result.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "o1", + [AlreadyExistsException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [InvalidObjectException, None], + None, + ), # 2 + ( + 3, + TType.STRUCT, + "o3", + [MetaException, None], + None, + ), # 3 +) + + +class get_dataconnector_req_args: + """ + Attributes: + - request + + """ + + def __init__( + self, + request=None, + ): + self.request = request + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.request = GetDataConnectorRequest() + self.request.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_dataconnector_req_args") + if self.request is not None: + oprot.writeFieldBegin("request", TType.STRUCT, 1) + self.request.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_dataconnector_req_args) +get_dataconnector_req_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "request", + [GetDataConnectorRequest, None], + None, + ), # 1 +) + + +class get_dataconnector_req_result: + """ + Attributes: + - success + - o1 + - o2 + + """ + + def __init__( + self, + success=None, + o1=None, + o2=None, + ): + self.success = success + self.o1 = o1 + self.o2 = o2 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = DataConnector() + self.success.read(iprot) + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = NoSuchObjectException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_dataconnector_req_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_dataconnector_req_result) +get_dataconnector_req_result.thrift_spec = ( + ( + 0, + TType.STRUCT, + "success", + [DataConnector, None], + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [NoSuchObjectException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [MetaException, None], + None, + ), # 2 +) + + +class drop_dataconnector_args: + """ + Attributes: + - name + - ifNotExists + - checkReferences + + """ + + def __init__( + self, + name=None, + ifNotExists=None, + checkReferences=None, + ): + self.name = name + self.ifNotExists = ifNotExists + self.checkReferences = checkReferences + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == -1: + if ftype == TType.BOOL: + self.ifNotExists = iprot.readBool() + else: + iprot.skip(ftype) + elif fid == -2: + if ftype == TType.BOOL: + self.checkReferences = iprot.readBool() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("drop_dataconnector_args") + if self.checkReferences is not None: + oprot.writeFieldBegin("checkReferences", TType.BOOL, -2) + oprot.writeBool(self.checkReferences) + oprot.writeFieldEnd() + if self.ifNotExists is not None: + oprot.writeFieldBegin("ifNotExists", TType.BOOL, -1) + oprot.writeBool(self.ifNotExists) + oprot.writeFieldEnd() + if self.name is not None: + oprot.writeFieldBegin("name", TType.STRING, 1) + oprot.writeString(self.name.encode("utf-8") if sys.version_info[0] == 2 else self.name) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(drop_dataconnector_args) +drop_dataconnector_args.thrift_spec = () + + +class drop_dataconnector_result: + """ + Attributes: + - o1 + - o2 + - o3 + + """ + + def __init__( + self, + o1=None, + o2=None, + o3=None, + ): + self.o1 = o1 + self.o2 = o2 + self.o3 = o3 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.o1 = NoSuchObjectException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = InvalidOperationException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRUCT: + self.o3 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("drop_dataconnector_result") + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + if self.o3 is not None: + oprot.writeFieldBegin("o3", TType.STRUCT, 3) + self.o3.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(drop_dataconnector_result) +drop_dataconnector_result.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "o1", + [NoSuchObjectException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [InvalidOperationException, None], + None, + ), # 2 + ( + 3, + TType.STRUCT, + "o3", + [MetaException, None], + None, + ), # 3 +) + + +class get_dataconnectors_args: + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_dataconnectors_args") + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_dataconnectors_args) +get_dataconnectors_args.thrift_spec = () + + +class get_dataconnectors_result: + """ + Attributes: + - success + - o1 + + """ + + def __init__( + self, + success=None, + o1=None, + ): + self.success = success + self.o1 = o1 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.LIST: + self.success = [] + (_etype1266, _size1263) = iprot.readListBegin() + for _i1267 in range(_size1263): + _elem1268 = ( + iprot.readString().decode("utf-8", errors="replace") + if sys.version_info[0] == 2 + else iprot.readString() + ) + self.success.append(_elem1268) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_dataconnectors_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.LIST, 0) + oprot.writeListBegin(TType.STRING, len(self.success)) + for iter1269 in self.success: + oprot.writeString(iter1269.encode("utf-8") if sys.version_info[0] == 2 else iter1269) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_dataconnectors_result) +get_dataconnectors_result.thrift_spec = ( + ( + 0, + TType.LIST, + "success", + (TType.STRING, "UTF8", False), + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [MetaException, None], + None, + ), # 1 +) + + +class alter_dataconnector_args: + """ + Attributes: + - name + - connector + + """ + + def __init__( + self, + name=None, + connector=None, + ): + self.name = name + self.connector = connector + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.connector = DataConnector() + self.connector.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("alter_dataconnector_args") + if self.name is not None: + oprot.writeFieldBegin("name", TType.STRING, 1) + oprot.writeString(self.name.encode("utf-8") if sys.version_info[0] == 2 else self.name) + oprot.writeFieldEnd() + if self.connector is not None: + oprot.writeFieldBegin("connector", TType.STRUCT, 2) + self.connector.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(alter_dataconnector_args) +alter_dataconnector_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "name", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRUCT, + "connector", + [DataConnector, None], + None, + ), # 2 +) + + +class alter_dataconnector_result: + """ + Attributes: + - o1 + - o2 + + """ + + def __init__( + self, + o1=None, + o2=None, + ): + self.o1 = o1 + self.o2 = o2 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = NoSuchObjectException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("alter_dataconnector_result") + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(alter_dataconnector_result) +alter_dataconnector_result.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "o1", + [MetaException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [NoSuchObjectException, None], + None, + ), # 2 +) + + +class get_type_args: + """ + Attributes: + - name + + """ + + def __init__( + self, + name=None, + ): + self.name = name + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_type_args") + if self.name is not None: + oprot.writeFieldBegin("name", TType.STRING, 1) + oprot.writeString(self.name.encode("utf-8") if sys.version_info[0] == 2 else self.name) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_type_args) +get_type_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "name", + "UTF8", + None, + ), # 1 +) + + +class get_type_result: + """ + Attributes: + - success + - o1 + - o2 + + """ + + def __init__( + self, + success=None, + o1=None, + o2=None, + ): + self.success = success + self.o1 = o1 + self.o2 = o2 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = Type() + self.success.read(iprot) + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = NoSuchObjectException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_type_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_type_result) +get_type_result.thrift_spec = ( + ( + 0, + TType.STRUCT, + "success", + [Type, None], + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [MetaException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [NoSuchObjectException, None], + None, + ), # 2 +) + + +class create_type_args: + """ + Attributes: + - type + + """ + + def __init__( + self, + type=None, + ): + self.type = type + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.type = Type() + self.type.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("create_type_args") + if self.type is not None: + oprot.writeFieldBegin("type", TType.STRUCT, 1) + self.type.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(create_type_args) +create_type_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "type", + [Type, None], + None, + ), # 1 +) + + +class create_type_result: + """ + Attributes: + - success + - o1 + - o2 + - o3 + + """ + + def __init__( + self, + success=None, + o1=None, + o2=None, + o3=None, + ): + self.success = success + self.o1 = o1 + self.o2 = o2 + self.o3 = o3 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.BOOL: + self.success = iprot.readBool() + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = AlreadyExistsException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = InvalidObjectException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRUCT: + self.o3 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("create_type_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.BOOL, 0) + oprot.writeBool(self.success) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + if self.o3 is not None: + oprot.writeFieldBegin("o3", TType.STRUCT, 3) + self.o3.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(create_type_result) +create_type_result.thrift_spec = ( + ( + 0, + TType.BOOL, + "success", + None, + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [AlreadyExistsException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [InvalidObjectException, None], + None, + ), # 2 + ( + 3, + TType.STRUCT, + "o3", + [MetaException, None], + None, + ), # 3 +) + + +class drop_type_args: + """ + Attributes: + - type + + """ + + def __init__( + self, + type=None, + ): + self.type = type + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.type = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("drop_type_args") + if self.type is not None: + oprot.writeFieldBegin("type", TType.STRING, 1) + oprot.writeString(self.type.encode("utf-8") if sys.version_info[0] == 2 else self.type) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(drop_type_args) +drop_type_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "type", + "UTF8", + None, + ), # 1 +) + + +class drop_type_result: + """ + Attributes: + - success + - o1 + - o2 + + """ + + def __init__( + self, + success=None, + o1=None, + o2=None, + ): + self.success = success + self.o1 = o1 + self.o2 = o2 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.BOOL: + self.success = iprot.readBool() + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = NoSuchObjectException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("drop_type_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.BOOL, 0) + oprot.writeBool(self.success) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(drop_type_result) +drop_type_result.thrift_spec = ( + ( + 0, + TType.BOOL, + "success", + None, + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [MetaException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [NoSuchObjectException, None], + None, + ), # 2 +) + + +class get_type_all_args: + """ + Attributes: + - name + + """ + + def __init__( + self, + name=None, + ): + self.name = name + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_type_all_args") + if self.name is not None: + oprot.writeFieldBegin("name", TType.STRING, 1) + oprot.writeString(self.name.encode("utf-8") if sys.version_info[0] == 2 else self.name) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_type_all_args) +get_type_all_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "name", + "UTF8", + None, + ), # 1 +) + + +class get_type_all_result: + """ + Attributes: + - success + - o2 + + """ + + def __init__( + self, + success=None, + o2=None, + ): + self.success = success + self.o2 = o2 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.MAP: + self.success = {} + (_ktype1271, _vtype1272, _size1270) = iprot.readMapBegin() + for _i1274 in range(_size1270): + _key1275 = ( + iprot.readString().decode("utf-8", errors="replace") + if sys.version_info[0] == 2 + else iprot.readString() + ) + _val1276 = Type() + _val1276.read(iprot) + self.success[_key1275] = _val1276 + iprot.readMapEnd() + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o2 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_type_all_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.MAP, 0) + oprot.writeMapBegin(TType.STRING, TType.STRUCT, len(self.success)) + for kiter1277, viter1278 in self.success.items(): + oprot.writeString(kiter1277.encode("utf-8") if sys.version_info[0] == 2 else kiter1277) + viter1278.write(oprot) + oprot.writeMapEnd() + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 1) + self.o2.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_type_all_result) +get_type_all_result.thrift_spec = ( + ( + 0, + TType.MAP, + "success", + (TType.STRING, "UTF8", TType.STRUCT, [Type, None], False), + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o2", + [MetaException, None], + None, + ), # 1 +) + + +class get_fields_args: + """ + Attributes: + - db_name + - table_name + + """ + + def __init__( + self, + db_name=None, + table_name=None, + ): + self.db_name = db_name + self.table_name = table_name + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.db_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.table_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_fields_args") + if self.db_name is not None: + oprot.writeFieldBegin("db_name", TType.STRING, 1) + oprot.writeString(self.db_name.encode("utf-8") if sys.version_info[0] == 2 else self.db_name) + oprot.writeFieldEnd() + if self.table_name is not None: + oprot.writeFieldBegin("table_name", TType.STRING, 2) + oprot.writeString(self.table_name.encode("utf-8") if sys.version_info[0] == 2 else self.table_name) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_fields_args) +get_fields_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "db_name", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "table_name", + "UTF8", + None, + ), # 2 +) + + +class get_fields_result: + """ + Attributes: + - success + - o1 + - o2 + - o3 + + """ + + def __init__( + self, + success=None, + o1=None, + o2=None, + o3=None, + ): + self.success = success + self.o1 = o1 + self.o2 = o2 + self.o3 = o3 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.LIST: + self.success = [] + (_etype1282, _size1279) = iprot.readListBegin() + for _i1283 in range(_size1279): + _elem1284 = FieldSchema() + _elem1284.read(iprot) + self.success.append(_elem1284) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = UnknownTableException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRUCT: + self.o3 = UnknownDBException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_fields_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.LIST, 0) + oprot.writeListBegin(TType.STRUCT, len(self.success)) + for iter1285 in self.success: + iter1285.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + if self.o3 is not None: + oprot.writeFieldBegin("o3", TType.STRUCT, 3) + self.o3.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_fields_result) +get_fields_result.thrift_spec = ( + ( + 0, + TType.LIST, + "success", + (TType.STRUCT, [FieldSchema, None], False), + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [MetaException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [UnknownTableException, None], + None, + ), # 2 + ( + 3, + TType.STRUCT, + "o3", + [UnknownDBException, None], + None, + ), # 3 +) + + +class get_fields_with_environment_context_args: + """ + Attributes: + - db_name + - table_name + - environment_context + + """ + + def __init__( + self, + db_name=None, + table_name=None, + environment_context=None, + ): + self.db_name = db_name + self.table_name = table_name + self.environment_context = environment_context + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.db_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.table_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRUCT: + self.environment_context = EnvironmentContext() + self.environment_context.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_fields_with_environment_context_args") + if self.db_name is not None: + oprot.writeFieldBegin("db_name", TType.STRING, 1) + oprot.writeString(self.db_name.encode("utf-8") if sys.version_info[0] == 2 else self.db_name) + oprot.writeFieldEnd() + if self.table_name is not None: + oprot.writeFieldBegin("table_name", TType.STRING, 2) + oprot.writeString(self.table_name.encode("utf-8") if sys.version_info[0] == 2 else self.table_name) + oprot.writeFieldEnd() + if self.environment_context is not None: + oprot.writeFieldBegin("environment_context", TType.STRUCT, 3) + self.environment_context.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_fields_with_environment_context_args) +get_fields_with_environment_context_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "db_name", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "table_name", + "UTF8", + None, + ), # 2 + ( + 3, + TType.STRUCT, + "environment_context", + [EnvironmentContext, None], + None, + ), # 3 +) + + +class get_fields_with_environment_context_result: + """ + Attributes: + - success + - o1 + - o2 + - o3 + + """ + + def __init__( + self, + success=None, + o1=None, + o2=None, + o3=None, + ): + self.success = success + self.o1 = o1 + self.o2 = o2 + self.o3 = o3 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.LIST: + self.success = [] + (_etype1289, _size1286) = iprot.readListBegin() + for _i1290 in range(_size1286): + _elem1291 = FieldSchema() + _elem1291.read(iprot) + self.success.append(_elem1291) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = UnknownTableException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRUCT: + self.o3 = UnknownDBException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_fields_with_environment_context_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.LIST, 0) + oprot.writeListBegin(TType.STRUCT, len(self.success)) + for iter1292 in self.success: + iter1292.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + if self.o3 is not None: + oprot.writeFieldBegin("o3", TType.STRUCT, 3) + self.o3.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_fields_with_environment_context_result) +get_fields_with_environment_context_result.thrift_spec = ( + ( + 0, + TType.LIST, + "success", + (TType.STRUCT, [FieldSchema, None], False), + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [MetaException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [UnknownTableException, None], + None, + ), # 2 + ( + 3, + TType.STRUCT, + "o3", + [UnknownDBException, None], + None, + ), # 3 +) + + +class get_fields_req_args: + """ + Attributes: + - req + + """ + + def __init__( + self, + req=None, + ): + self.req = req + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.req = GetFieldsRequest() + self.req.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_fields_req_args") + if self.req is not None: + oprot.writeFieldBegin("req", TType.STRUCT, 1) + self.req.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_fields_req_args) +get_fields_req_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "req", + [GetFieldsRequest, None], + None, + ), # 1 +) + + +class get_fields_req_result: + """ + Attributes: + - success + - o1 + - o2 + - o3 + + """ + + def __init__( + self, + success=None, + o1=None, + o2=None, + o3=None, + ): + self.success = success + self.o1 = o1 + self.o2 = o2 + self.o3 = o3 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = GetFieldsResponse() + self.success.read(iprot) + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = UnknownTableException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRUCT: + self.o3 = UnknownDBException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_fields_req_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + if self.o3 is not None: + oprot.writeFieldBegin("o3", TType.STRUCT, 3) + self.o3.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_fields_req_result) +get_fields_req_result.thrift_spec = ( + ( + 0, + TType.STRUCT, + "success", + [GetFieldsResponse, None], + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [MetaException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [UnknownTableException, None], + None, + ), # 2 + ( + 3, + TType.STRUCT, + "o3", + [UnknownDBException, None], + None, + ), # 3 +) + + +class get_schema_args: + """ + Attributes: + - db_name + - table_name + + """ + + def __init__( + self, + db_name=None, + table_name=None, + ): + self.db_name = db_name + self.table_name = table_name + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.db_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.table_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_schema_args") + if self.db_name is not None: + oprot.writeFieldBegin("db_name", TType.STRING, 1) + oprot.writeString(self.db_name.encode("utf-8") if sys.version_info[0] == 2 else self.db_name) + oprot.writeFieldEnd() + if self.table_name is not None: + oprot.writeFieldBegin("table_name", TType.STRING, 2) + oprot.writeString(self.table_name.encode("utf-8") if sys.version_info[0] == 2 else self.table_name) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_schema_args) +get_schema_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "db_name", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "table_name", + "UTF8", + None, + ), # 2 +) + + +class get_schema_result: + """ + Attributes: + - success + - o1 + - o2 + - o3 + + """ + + def __init__( + self, + success=None, + o1=None, + o2=None, + o3=None, + ): + self.success = success + self.o1 = o1 + self.o2 = o2 + self.o3 = o3 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.LIST: + self.success = [] + (_etype1296, _size1293) = iprot.readListBegin() + for _i1297 in range(_size1293): + _elem1298 = FieldSchema() + _elem1298.read(iprot) + self.success.append(_elem1298) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = UnknownTableException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRUCT: + self.o3 = UnknownDBException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_schema_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.LIST, 0) + oprot.writeListBegin(TType.STRUCT, len(self.success)) + for iter1299 in self.success: + iter1299.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + if self.o3 is not None: + oprot.writeFieldBegin("o3", TType.STRUCT, 3) + self.o3.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_schema_result) +get_schema_result.thrift_spec = ( + ( + 0, + TType.LIST, + "success", + (TType.STRUCT, [FieldSchema, None], False), + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [MetaException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [UnknownTableException, None], + None, + ), # 2 + ( + 3, + TType.STRUCT, + "o3", + [UnknownDBException, None], + None, + ), # 3 +) + + +class get_schema_with_environment_context_args: + """ + Attributes: + - db_name + - table_name + - environment_context + + """ + + def __init__( + self, + db_name=None, + table_name=None, + environment_context=None, + ): + self.db_name = db_name + self.table_name = table_name + self.environment_context = environment_context + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.db_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.table_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRUCT: + self.environment_context = EnvironmentContext() + self.environment_context.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_schema_with_environment_context_args") + if self.db_name is not None: + oprot.writeFieldBegin("db_name", TType.STRING, 1) + oprot.writeString(self.db_name.encode("utf-8") if sys.version_info[0] == 2 else self.db_name) + oprot.writeFieldEnd() + if self.table_name is not None: + oprot.writeFieldBegin("table_name", TType.STRING, 2) + oprot.writeString(self.table_name.encode("utf-8") if sys.version_info[0] == 2 else self.table_name) + oprot.writeFieldEnd() + if self.environment_context is not None: + oprot.writeFieldBegin("environment_context", TType.STRUCT, 3) + self.environment_context.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_schema_with_environment_context_args) +get_schema_with_environment_context_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "db_name", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "table_name", + "UTF8", + None, + ), # 2 + ( + 3, + TType.STRUCT, + "environment_context", + [EnvironmentContext, None], + None, + ), # 3 +) + + +class get_schema_with_environment_context_result: + """ + Attributes: + - success + - o1 + - o2 + - o3 + + """ + + def __init__( + self, + success=None, + o1=None, + o2=None, + o3=None, + ): + self.success = success + self.o1 = o1 + self.o2 = o2 + self.o3 = o3 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.LIST: + self.success = [] + (_etype1303, _size1300) = iprot.readListBegin() + for _i1304 in range(_size1300): + _elem1305 = FieldSchema() + _elem1305.read(iprot) + self.success.append(_elem1305) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = UnknownTableException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRUCT: + self.o3 = UnknownDBException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_schema_with_environment_context_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.LIST, 0) + oprot.writeListBegin(TType.STRUCT, len(self.success)) + for iter1306 in self.success: + iter1306.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + if self.o3 is not None: + oprot.writeFieldBegin("o3", TType.STRUCT, 3) + self.o3.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_schema_with_environment_context_result) +get_schema_with_environment_context_result.thrift_spec = ( + ( + 0, + TType.LIST, + "success", + (TType.STRUCT, [FieldSchema, None], False), + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [MetaException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [UnknownTableException, None], + None, + ), # 2 + ( + 3, + TType.STRUCT, + "o3", + [UnknownDBException, None], + None, + ), # 3 +) + + +class get_schema_req_args: + """ + Attributes: + - req + + """ + + def __init__( + self, + req=None, + ): + self.req = req + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.req = GetSchemaRequest() + self.req.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_schema_req_args") + if self.req is not None: + oprot.writeFieldBegin("req", TType.STRUCT, 1) + self.req.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_schema_req_args) +get_schema_req_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "req", + [GetSchemaRequest, None], + None, + ), # 1 +) + + +class get_schema_req_result: + """ + Attributes: + - success + - o1 + - o2 + - o3 + + """ + + def __init__( + self, + success=None, + o1=None, + o2=None, + o3=None, + ): + self.success = success + self.o1 = o1 + self.o2 = o2 + self.o3 = o3 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = GetSchemaResponse() + self.success.read(iprot) + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = UnknownTableException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRUCT: + self.o3 = UnknownDBException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_schema_req_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + if self.o3 is not None: + oprot.writeFieldBegin("o3", TType.STRUCT, 3) + self.o3.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_schema_req_result) +get_schema_req_result.thrift_spec = ( + ( + 0, + TType.STRUCT, + "success", + [GetSchemaResponse, None], + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [MetaException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [UnknownTableException, None], + None, + ), # 2 + ( + 3, + TType.STRUCT, + "o3", + [UnknownDBException, None], + None, + ), # 3 +) + + +class create_table_args: + """ + Attributes: + - tbl + + """ + + def __init__( + self, + tbl=None, + ): + self.tbl = tbl + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.tbl = Table() + self.tbl.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("create_table_args") + if self.tbl is not None: + oprot.writeFieldBegin("tbl", TType.STRUCT, 1) + self.tbl.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(create_table_args) +create_table_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "tbl", + [Table, None], + None, + ), # 1 +) + + +class create_table_result: + """ + Attributes: + - o1 + - o2 + - o3 + - o4 + + """ + + def __init__( + self, + o1=None, + o2=None, + o3=None, + o4=None, + ): + self.o1 = o1 + self.o2 = o2 + self.o3 = o3 + self.o4 = o4 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.o1 = AlreadyExistsException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = InvalidObjectException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRUCT: + self.o3 = MetaException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRUCT: + self.o4 = NoSuchObjectException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("create_table_result") + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + if self.o3 is not None: + oprot.writeFieldBegin("o3", TType.STRUCT, 3) + self.o3.write(oprot) + oprot.writeFieldEnd() + if self.o4 is not None: + oprot.writeFieldBegin("o4", TType.STRUCT, 4) + self.o4.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(create_table_result) +create_table_result.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "o1", + [AlreadyExistsException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [InvalidObjectException, None], + None, + ), # 2 + ( + 3, + TType.STRUCT, + "o3", + [MetaException, None], + None, + ), # 3 + ( + 4, + TType.STRUCT, + "o4", + [NoSuchObjectException, None], + None, + ), # 4 +) + + +class create_table_with_environment_context_args: + """ + Attributes: + - tbl + - environment_context + + """ + + def __init__( + self, + tbl=None, + environment_context=None, + ): + self.tbl = tbl + self.environment_context = environment_context + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.tbl = Table() + self.tbl.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.environment_context = EnvironmentContext() + self.environment_context.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("create_table_with_environment_context_args") + if self.tbl is not None: + oprot.writeFieldBegin("tbl", TType.STRUCT, 1) + self.tbl.write(oprot) + oprot.writeFieldEnd() + if self.environment_context is not None: + oprot.writeFieldBegin("environment_context", TType.STRUCT, 2) + self.environment_context.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(create_table_with_environment_context_args) +create_table_with_environment_context_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "tbl", + [Table, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "environment_context", + [EnvironmentContext, None], + None, + ), # 2 +) + + +class create_table_with_environment_context_result: + """ + Attributes: + - o1 + - o2 + - o3 + - o4 + + """ + + def __init__( + self, + o1=None, + o2=None, + o3=None, + o4=None, + ): + self.o1 = o1 + self.o2 = o2 + self.o3 = o3 + self.o4 = o4 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.o1 = AlreadyExistsException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = InvalidObjectException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRUCT: + self.o3 = MetaException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRUCT: + self.o4 = NoSuchObjectException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("create_table_with_environment_context_result") + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + if self.o3 is not None: + oprot.writeFieldBegin("o3", TType.STRUCT, 3) + self.o3.write(oprot) + oprot.writeFieldEnd() + if self.o4 is not None: + oprot.writeFieldBegin("o4", TType.STRUCT, 4) + self.o4.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(create_table_with_environment_context_result) +create_table_with_environment_context_result.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "o1", + [AlreadyExistsException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [InvalidObjectException, None], + None, + ), # 2 + ( + 3, + TType.STRUCT, + "o3", + [MetaException, None], + None, + ), # 3 + ( + 4, + TType.STRUCT, + "o4", + [NoSuchObjectException, None], + None, + ), # 4 +) + + +class create_table_with_constraints_args: + """ + Attributes: + - tbl + - primaryKeys + - foreignKeys + - uniqueConstraints + - notNullConstraints + - defaultConstraints + - checkConstraints + + """ + + def __init__( + self, + tbl=None, + primaryKeys=None, + foreignKeys=None, + uniqueConstraints=None, + notNullConstraints=None, + defaultConstraints=None, + checkConstraints=None, + ): + self.tbl = tbl + self.primaryKeys = primaryKeys + self.foreignKeys = foreignKeys + self.uniqueConstraints = uniqueConstraints + self.notNullConstraints = notNullConstraints + self.defaultConstraints = defaultConstraints + self.checkConstraints = checkConstraints + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.tbl = Table() + self.tbl.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.LIST: + self.primaryKeys = [] + (_etype1310, _size1307) = iprot.readListBegin() + for _i1311 in range(_size1307): + _elem1312 = SQLPrimaryKey() + _elem1312.read(iprot) + self.primaryKeys.append(_elem1312) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.LIST: + self.foreignKeys = [] + (_etype1316, _size1313) = iprot.readListBegin() + for _i1317 in range(_size1313): + _elem1318 = SQLForeignKey() + _elem1318.read(iprot) + self.foreignKeys.append(_elem1318) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.LIST: + self.uniqueConstraints = [] + (_etype1322, _size1319) = iprot.readListBegin() + for _i1323 in range(_size1319): + _elem1324 = SQLUniqueConstraint() + _elem1324.read(iprot) + self.uniqueConstraints.append(_elem1324) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.LIST: + self.notNullConstraints = [] + (_etype1328, _size1325) = iprot.readListBegin() + for _i1329 in range(_size1325): + _elem1330 = SQLNotNullConstraint() + _elem1330.read(iprot) + self.notNullConstraints.append(_elem1330) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 6: + if ftype == TType.LIST: + self.defaultConstraints = [] + (_etype1334, _size1331) = iprot.readListBegin() + for _i1335 in range(_size1331): + _elem1336 = SQLDefaultConstraint() + _elem1336.read(iprot) + self.defaultConstraints.append(_elem1336) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 7: + if ftype == TType.LIST: + self.checkConstraints = [] + (_etype1340, _size1337) = iprot.readListBegin() + for _i1341 in range(_size1337): + _elem1342 = SQLCheckConstraint() + _elem1342.read(iprot) + self.checkConstraints.append(_elem1342) + iprot.readListEnd() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("create_table_with_constraints_args") + if self.tbl is not None: + oprot.writeFieldBegin("tbl", TType.STRUCT, 1) + self.tbl.write(oprot) + oprot.writeFieldEnd() + if self.primaryKeys is not None: + oprot.writeFieldBegin("primaryKeys", TType.LIST, 2) + oprot.writeListBegin(TType.STRUCT, len(self.primaryKeys)) + for iter1343 in self.primaryKeys: + iter1343.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.foreignKeys is not None: + oprot.writeFieldBegin("foreignKeys", TType.LIST, 3) + oprot.writeListBegin(TType.STRUCT, len(self.foreignKeys)) + for iter1344 in self.foreignKeys: + iter1344.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.uniqueConstraints is not None: + oprot.writeFieldBegin("uniqueConstraints", TType.LIST, 4) + oprot.writeListBegin(TType.STRUCT, len(self.uniqueConstraints)) + for iter1345 in self.uniqueConstraints: + iter1345.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.notNullConstraints is not None: + oprot.writeFieldBegin("notNullConstraints", TType.LIST, 5) + oprot.writeListBegin(TType.STRUCT, len(self.notNullConstraints)) + for iter1346 in self.notNullConstraints: + iter1346.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.defaultConstraints is not None: + oprot.writeFieldBegin("defaultConstraints", TType.LIST, 6) + oprot.writeListBegin(TType.STRUCT, len(self.defaultConstraints)) + for iter1347 in self.defaultConstraints: + iter1347.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.checkConstraints is not None: + oprot.writeFieldBegin("checkConstraints", TType.LIST, 7) + oprot.writeListBegin(TType.STRUCT, len(self.checkConstraints)) + for iter1348 in self.checkConstraints: + iter1348.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(create_table_with_constraints_args) +create_table_with_constraints_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "tbl", + [Table, None], + None, + ), # 1 + ( + 2, + TType.LIST, + "primaryKeys", + (TType.STRUCT, [SQLPrimaryKey, None], False), + None, + ), # 2 + ( + 3, + TType.LIST, + "foreignKeys", + (TType.STRUCT, [SQLForeignKey, None], False), + None, + ), # 3 + ( + 4, + TType.LIST, + "uniqueConstraints", + (TType.STRUCT, [SQLUniqueConstraint, None], False), + None, + ), # 4 + ( + 5, + TType.LIST, + "notNullConstraints", + (TType.STRUCT, [SQLNotNullConstraint, None], False), + None, + ), # 5 + ( + 6, + TType.LIST, + "defaultConstraints", + (TType.STRUCT, [SQLDefaultConstraint, None], False), + None, + ), # 6 + ( + 7, + TType.LIST, + "checkConstraints", + (TType.STRUCT, [SQLCheckConstraint, None], False), + None, + ), # 7 +) + + +class create_table_with_constraints_result: + """ + Attributes: + - o1 + - o2 + - o3 + - o4 + + """ + + def __init__( + self, + o1=None, + o2=None, + o3=None, + o4=None, + ): + self.o1 = o1 + self.o2 = o2 + self.o3 = o3 + self.o4 = o4 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.o1 = AlreadyExistsException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = InvalidObjectException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRUCT: + self.o3 = MetaException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRUCT: + self.o4 = NoSuchObjectException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("create_table_with_constraints_result") + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + if self.o3 is not None: + oprot.writeFieldBegin("o3", TType.STRUCT, 3) + self.o3.write(oprot) + oprot.writeFieldEnd() + if self.o4 is not None: + oprot.writeFieldBegin("o4", TType.STRUCT, 4) + self.o4.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(create_table_with_constraints_result) +create_table_with_constraints_result.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "o1", + [AlreadyExistsException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [InvalidObjectException, None], + None, + ), # 2 + ( + 3, + TType.STRUCT, + "o3", + [MetaException, None], + None, + ), # 3 + ( + 4, + TType.STRUCT, + "o4", + [NoSuchObjectException, None], + None, + ), # 4 +) + + +class create_table_req_args: + """ + Attributes: + - request + + """ + + def __init__( + self, + request=None, + ): + self.request = request + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.request = CreateTableRequest() + self.request.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("create_table_req_args") + if self.request is not None: + oprot.writeFieldBegin("request", TType.STRUCT, 1) + self.request.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(create_table_req_args) +create_table_req_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "request", + [CreateTableRequest, None], + None, + ), # 1 +) + + +class create_table_req_result: + """ + Attributes: + - o1 + - o2 + - o3 + - o4 + + """ + + def __init__( + self, + o1=None, + o2=None, + o3=None, + o4=None, + ): + self.o1 = o1 + self.o2 = o2 + self.o3 = o3 + self.o4 = o4 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.o1 = AlreadyExistsException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = InvalidObjectException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRUCT: + self.o3 = MetaException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRUCT: + self.o4 = NoSuchObjectException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("create_table_req_result") + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + if self.o3 is not None: + oprot.writeFieldBegin("o3", TType.STRUCT, 3) + self.o3.write(oprot) + oprot.writeFieldEnd() + if self.o4 is not None: + oprot.writeFieldBegin("o4", TType.STRUCT, 4) + self.o4.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(create_table_req_result) +create_table_req_result.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "o1", + [AlreadyExistsException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [InvalidObjectException, None], + None, + ), # 2 + ( + 3, + TType.STRUCT, + "o3", + [MetaException, None], + None, + ), # 3 + ( + 4, + TType.STRUCT, + "o4", + [NoSuchObjectException, None], + None, + ), # 4 +) + + +class drop_constraint_args: + """ + Attributes: + - req + + """ + + def __init__( + self, + req=None, + ): + self.req = req + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.req = DropConstraintRequest() + self.req.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("drop_constraint_args") + if self.req is not None: + oprot.writeFieldBegin("req", TType.STRUCT, 1) + self.req.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(drop_constraint_args) +drop_constraint_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "req", + [DropConstraintRequest, None], + None, + ), # 1 +) + + +class drop_constraint_result: + """ + Attributes: + - o1 + - o3 + + """ + + def __init__( + self, + o1=None, + o3=None, + ): + self.o1 = o1 + self.o3 = o3 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.o1 = NoSuchObjectException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o3 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("drop_constraint_result") + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o3 is not None: + oprot.writeFieldBegin("o3", TType.STRUCT, 2) + self.o3.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(drop_constraint_result) +drop_constraint_result.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "o1", + [NoSuchObjectException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o3", + [MetaException, None], + None, + ), # 2 +) + + +class add_primary_key_args: + """ + Attributes: + - req + + """ + + def __init__( + self, + req=None, + ): + self.req = req + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.req = AddPrimaryKeyRequest() + self.req.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("add_primary_key_args") + if self.req is not None: + oprot.writeFieldBegin("req", TType.STRUCT, 1) + self.req.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(add_primary_key_args) +add_primary_key_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "req", + [AddPrimaryKeyRequest, None], + None, + ), # 1 +) + + +class add_primary_key_result: + """ + Attributes: + - o1 + - o2 + + """ + + def __init__( + self, + o1=None, + o2=None, + ): + self.o1 = o1 + self.o2 = o2 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.o1 = NoSuchObjectException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("add_primary_key_result") + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(add_primary_key_result) +add_primary_key_result.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "o1", + [NoSuchObjectException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [MetaException, None], + None, + ), # 2 +) + + +class add_foreign_key_args: + """ + Attributes: + - req + + """ + + def __init__( + self, + req=None, + ): + self.req = req + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.req = AddForeignKeyRequest() + self.req.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("add_foreign_key_args") + if self.req is not None: + oprot.writeFieldBegin("req", TType.STRUCT, 1) + self.req.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(add_foreign_key_args) +add_foreign_key_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "req", + [AddForeignKeyRequest, None], + None, + ), # 1 +) + + +class add_foreign_key_result: + """ + Attributes: + - o1 + - o2 + + """ + + def __init__( + self, + o1=None, + o2=None, + ): + self.o1 = o1 + self.o2 = o2 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.o1 = NoSuchObjectException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("add_foreign_key_result") + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(add_foreign_key_result) +add_foreign_key_result.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "o1", + [NoSuchObjectException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [MetaException, None], + None, + ), # 2 +) + + +class add_unique_constraint_args: + """ + Attributes: + - req + + """ + + def __init__( + self, + req=None, + ): + self.req = req + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.req = AddUniqueConstraintRequest() + self.req.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("add_unique_constraint_args") + if self.req is not None: + oprot.writeFieldBegin("req", TType.STRUCT, 1) + self.req.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(add_unique_constraint_args) +add_unique_constraint_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "req", + [AddUniqueConstraintRequest, None], + None, + ), # 1 +) + + +class add_unique_constraint_result: + """ + Attributes: + - o1 + - o2 + + """ + + def __init__( + self, + o1=None, + o2=None, + ): + self.o1 = o1 + self.o2 = o2 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.o1 = NoSuchObjectException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("add_unique_constraint_result") + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(add_unique_constraint_result) +add_unique_constraint_result.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "o1", + [NoSuchObjectException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [MetaException, None], + None, + ), # 2 +) + + +class add_not_null_constraint_args: + """ + Attributes: + - req + + """ + + def __init__( + self, + req=None, + ): + self.req = req + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.req = AddNotNullConstraintRequest() + self.req.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("add_not_null_constraint_args") + if self.req is not None: + oprot.writeFieldBegin("req", TType.STRUCT, 1) + self.req.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(add_not_null_constraint_args) +add_not_null_constraint_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "req", + [AddNotNullConstraintRequest, None], + None, + ), # 1 +) + + +class add_not_null_constraint_result: + """ + Attributes: + - o1 + - o2 + + """ + + def __init__( + self, + o1=None, + o2=None, + ): + self.o1 = o1 + self.o2 = o2 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.o1 = NoSuchObjectException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("add_not_null_constraint_result") + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(add_not_null_constraint_result) +add_not_null_constraint_result.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "o1", + [NoSuchObjectException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [MetaException, None], + None, + ), # 2 +) + + +class add_default_constraint_args: + """ + Attributes: + - req + + """ + + def __init__( + self, + req=None, + ): + self.req = req + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.req = AddDefaultConstraintRequest() + self.req.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("add_default_constraint_args") + if self.req is not None: + oprot.writeFieldBegin("req", TType.STRUCT, 1) + self.req.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(add_default_constraint_args) +add_default_constraint_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "req", + [AddDefaultConstraintRequest, None], + None, + ), # 1 +) + + +class add_default_constraint_result: + """ + Attributes: + - o1 + - o2 + + """ + + def __init__( + self, + o1=None, + o2=None, + ): + self.o1 = o1 + self.o2 = o2 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.o1 = NoSuchObjectException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("add_default_constraint_result") + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(add_default_constraint_result) +add_default_constraint_result.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "o1", + [NoSuchObjectException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [MetaException, None], + None, + ), # 2 +) + + +class add_check_constraint_args: + """ + Attributes: + - req + + """ + + def __init__( + self, + req=None, + ): + self.req = req + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.req = AddCheckConstraintRequest() + self.req.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("add_check_constraint_args") + if self.req is not None: + oprot.writeFieldBegin("req", TType.STRUCT, 1) + self.req.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(add_check_constraint_args) +add_check_constraint_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "req", + [AddCheckConstraintRequest, None], + None, + ), # 1 +) + + +class add_check_constraint_result: + """ + Attributes: + - o1 + - o2 + + """ + + def __init__( + self, + o1=None, + o2=None, + ): + self.o1 = o1 + self.o2 = o2 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.o1 = NoSuchObjectException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("add_check_constraint_result") + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(add_check_constraint_result) +add_check_constraint_result.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "o1", + [NoSuchObjectException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [MetaException, None], + None, + ), # 2 +) + + +class translate_table_dryrun_args: + """ + Attributes: + - request + + """ + + def __init__( + self, + request=None, + ): + self.request = request + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.request = CreateTableRequest() + self.request.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("translate_table_dryrun_args") + if self.request is not None: + oprot.writeFieldBegin("request", TType.STRUCT, 1) + self.request.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(translate_table_dryrun_args) +translate_table_dryrun_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "request", + [CreateTableRequest, None], + None, + ), # 1 +) + + +class translate_table_dryrun_result: + """ + Attributes: + - success + - o1 + - o2 + - o3 + - o4 + + """ + + def __init__( + self, + success=None, + o1=None, + o2=None, + o3=None, + o4=None, + ): + self.success = success + self.o1 = o1 + self.o2 = o2 + self.o3 = o3 + self.o4 = o4 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = Table() + self.success.read(iprot) + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = AlreadyExistsException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = InvalidObjectException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRUCT: + self.o3 = MetaException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRUCT: + self.o4 = NoSuchObjectException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("translate_table_dryrun_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + if self.o3 is not None: + oprot.writeFieldBegin("o3", TType.STRUCT, 3) + self.o3.write(oprot) + oprot.writeFieldEnd() + if self.o4 is not None: + oprot.writeFieldBegin("o4", TType.STRUCT, 4) + self.o4.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(translate_table_dryrun_result) +translate_table_dryrun_result.thrift_spec = ( + ( + 0, + TType.STRUCT, + "success", + [Table, None], + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [AlreadyExistsException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [InvalidObjectException, None], + None, + ), # 2 + ( + 3, + TType.STRUCT, + "o3", + [MetaException, None], + None, + ), # 3 + ( + 4, + TType.STRUCT, + "o4", + [NoSuchObjectException, None], + None, + ), # 4 +) + + +class drop_table_args: + """ + Attributes: + - dbname + - name + - deleteData + + """ + + def __init__( + self, + dbname=None, + name=None, + deleteData=None, + ): + self.dbname = dbname + self.name = name + self.deleteData = deleteData + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.dbname = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.BOOL: + self.deleteData = iprot.readBool() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("drop_table_args") + if self.dbname is not None: + oprot.writeFieldBegin("dbname", TType.STRING, 1) + oprot.writeString(self.dbname.encode("utf-8") if sys.version_info[0] == 2 else self.dbname) + oprot.writeFieldEnd() + if self.name is not None: + oprot.writeFieldBegin("name", TType.STRING, 2) + oprot.writeString(self.name.encode("utf-8") if sys.version_info[0] == 2 else self.name) + oprot.writeFieldEnd() + if self.deleteData is not None: + oprot.writeFieldBegin("deleteData", TType.BOOL, 3) + oprot.writeBool(self.deleteData) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(drop_table_args) +drop_table_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "dbname", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "name", + "UTF8", + None, + ), # 2 + ( + 3, + TType.BOOL, + "deleteData", + None, + None, + ), # 3 +) + + +class drop_table_result: + """ + Attributes: + - o1 + - o3 + + """ + + def __init__( + self, + o1=None, + o3=None, + ): + self.o1 = o1 + self.o3 = o3 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.o1 = NoSuchObjectException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o3 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("drop_table_result") + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o3 is not None: + oprot.writeFieldBegin("o3", TType.STRUCT, 2) + self.o3.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(drop_table_result) +drop_table_result.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "o1", + [NoSuchObjectException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o3", + [MetaException, None], + None, + ), # 2 +) + + +class drop_table_with_environment_context_args: + """ + Attributes: + - dbname + - name + - deleteData + - environment_context + + """ + + def __init__( + self, + dbname=None, + name=None, + deleteData=None, + environment_context=None, + ): + self.dbname = dbname + self.name = name + self.deleteData = deleteData + self.environment_context = environment_context + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.dbname = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.BOOL: + self.deleteData = iprot.readBool() + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRUCT: + self.environment_context = EnvironmentContext() + self.environment_context.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("drop_table_with_environment_context_args") + if self.dbname is not None: + oprot.writeFieldBegin("dbname", TType.STRING, 1) + oprot.writeString(self.dbname.encode("utf-8") if sys.version_info[0] == 2 else self.dbname) + oprot.writeFieldEnd() + if self.name is not None: + oprot.writeFieldBegin("name", TType.STRING, 2) + oprot.writeString(self.name.encode("utf-8") if sys.version_info[0] == 2 else self.name) + oprot.writeFieldEnd() + if self.deleteData is not None: + oprot.writeFieldBegin("deleteData", TType.BOOL, 3) + oprot.writeBool(self.deleteData) + oprot.writeFieldEnd() + if self.environment_context is not None: + oprot.writeFieldBegin("environment_context", TType.STRUCT, 4) + self.environment_context.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(drop_table_with_environment_context_args) +drop_table_with_environment_context_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "dbname", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "name", + "UTF8", + None, + ), # 2 + ( + 3, + TType.BOOL, + "deleteData", + None, + None, + ), # 3 + ( + 4, + TType.STRUCT, + "environment_context", + [EnvironmentContext, None], + None, + ), # 4 +) + + +class drop_table_with_environment_context_result: + """ + Attributes: + - o1 + - o3 + + """ + + def __init__( + self, + o1=None, + o3=None, + ): + self.o1 = o1 + self.o3 = o3 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.o1 = NoSuchObjectException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o3 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("drop_table_with_environment_context_result") + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o3 is not None: + oprot.writeFieldBegin("o3", TType.STRUCT, 2) + self.o3.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(drop_table_with_environment_context_result) +drop_table_with_environment_context_result.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "o1", + [NoSuchObjectException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o3", + [MetaException, None], + None, + ), # 2 +) + + +class truncate_table_args: + """ + Attributes: + - dbName + - tableName + - partNames + + """ + + def __init__( + self, + dbName=None, + tableName=None, + partNames=None, + ): + self.dbName = dbName + self.tableName = tableName + self.partNames = partNames + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.dbName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.tableName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.LIST: + self.partNames = [] + (_etype1352, _size1349) = iprot.readListBegin() + for _i1353 in range(_size1349): + _elem1354 = ( + iprot.readString().decode("utf-8", errors="replace") + if sys.version_info[0] == 2 + else iprot.readString() + ) + self.partNames.append(_elem1354) + iprot.readListEnd() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("truncate_table_args") + if self.dbName is not None: + oprot.writeFieldBegin("dbName", TType.STRING, 1) + oprot.writeString(self.dbName.encode("utf-8") if sys.version_info[0] == 2 else self.dbName) + oprot.writeFieldEnd() + if self.tableName is not None: + oprot.writeFieldBegin("tableName", TType.STRING, 2) + oprot.writeString(self.tableName.encode("utf-8") if sys.version_info[0] == 2 else self.tableName) + oprot.writeFieldEnd() + if self.partNames is not None: + oprot.writeFieldBegin("partNames", TType.LIST, 3) + oprot.writeListBegin(TType.STRING, len(self.partNames)) + for iter1355 in self.partNames: + oprot.writeString(iter1355.encode("utf-8") if sys.version_info[0] == 2 else iter1355) + oprot.writeListEnd() + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(truncate_table_args) +truncate_table_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "dbName", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "tableName", + "UTF8", + None, + ), # 2 + ( + 3, + TType.LIST, + "partNames", + (TType.STRING, "UTF8", False), + None, + ), # 3 +) + + +class truncate_table_result: + """ + Attributes: + - o1 + + """ + + def __init__( + self, + o1=None, + ): + self.o1 = o1 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("truncate_table_result") + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(truncate_table_result) +truncate_table_result.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "o1", + [MetaException, None], + None, + ), # 1 +) + + +class truncate_table_req_args: + """ + Attributes: + - req + + """ + + def __init__( + self, + req=None, + ): + self.req = req + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.req = TruncateTableRequest() + self.req.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("truncate_table_req_args") + if self.req is not None: + oprot.writeFieldBegin("req", TType.STRUCT, 1) + self.req.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(truncate_table_req_args) +truncate_table_req_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "req", + [TruncateTableRequest, None], + None, + ), # 1 +) + + +class truncate_table_req_result: + """ + Attributes: + - success + - o1 + + """ + + def __init__( + self, + success=None, + o1=None, + ): + self.success = success + self.o1 = o1 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = TruncateTableResponse() + self.success.read(iprot) + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("truncate_table_req_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(truncate_table_req_result) +truncate_table_req_result.thrift_spec = ( + ( + 0, + TType.STRUCT, + "success", + [TruncateTableResponse, None], + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [MetaException, None], + None, + ), # 1 +) + + +class get_tables_args: + """ + Attributes: + - db_name + - pattern + + """ + + def __init__( + self, + db_name=None, + pattern=None, + ): + self.db_name = db_name + self.pattern = pattern + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.db_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.pattern = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_tables_args") + if self.db_name is not None: + oprot.writeFieldBegin("db_name", TType.STRING, 1) + oprot.writeString(self.db_name.encode("utf-8") if sys.version_info[0] == 2 else self.db_name) + oprot.writeFieldEnd() + if self.pattern is not None: + oprot.writeFieldBegin("pattern", TType.STRING, 2) + oprot.writeString(self.pattern.encode("utf-8") if sys.version_info[0] == 2 else self.pattern) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_tables_args) +get_tables_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "db_name", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "pattern", + "UTF8", + None, + ), # 2 +) + + +class get_tables_result: + """ + Attributes: + - success + - o1 + + """ + + def __init__( + self, + success=None, + o1=None, + ): + self.success = success + self.o1 = o1 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.LIST: + self.success = [] + (_etype1359, _size1356) = iprot.readListBegin() + for _i1360 in range(_size1356): + _elem1361 = ( + iprot.readString().decode("utf-8", errors="replace") + if sys.version_info[0] == 2 + else iprot.readString() + ) + self.success.append(_elem1361) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_tables_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.LIST, 0) + oprot.writeListBegin(TType.STRING, len(self.success)) + for iter1362 in self.success: + oprot.writeString(iter1362.encode("utf-8") if sys.version_info[0] == 2 else iter1362) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_tables_result) +get_tables_result.thrift_spec = ( + ( + 0, + TType.LIST, + "success", + (TType.STRING, "UTF8", False), + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [MetaException, None], + None, + ), # 1 +) + + +class get_tables_by_type_args: + """ + Attributes: + - db_name + - pattern + - tableType + + """ + + def __init__( + self, + db_name=None, + pattern=None, + tableType=None, + ): + self.db_name = db_name + self.pattern = pattern + self.tableType = tableType + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.db_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.pattern = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.tableType = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_tables_by_type_args") + if self.db_name is not None: + oprot.writeFieldBegin("db_name", TType.STRING, 1) + oprot.writeString(self.db_name.encode("utf-8") if sys.version_info[0] == 2 else self.db_name) + oprot.writeFieldEnd() + if self.pattern is not None: + oprot.writeFieldBegin("pattern", TType.STRING, 2) + oprot.writeString(self.pattern.encode("utf-8") if sys.version_info[0] == 2 else self.pattern) + oprot.writeFieldEnd() + if self.tableType is not None: + oprot.writeFieldBegin("tableType", TType.STRING, 3) + oprot.writeString(self.tableType.encode("utf-8") if sys.version_info[0] == 2 else self.tableType) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_tables_by_type_args) +get_tables_by_type_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "db_name", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "pattern", + "UTF8", + None, + ), # 2 + ( + 3, + TType.STRING, + "tableType", + "UTF8", + None, + ), # 3 +) + + +class get_tables_by_type_result: + """ + Attributes: + - success + - o1 + + """ + + def __init__( + self, + success=None, + o1=None, + ): + self.success = success + self.o1 = o1 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.LIST: + self.success = [] + (_etype1366, _size1363) = iprot.readListBegin() + for _i1367 in range(_size1363): + _elem1368 = ( + iprot.readString().decode("utf-8", errors="replace") + if sys.version_info[0] == 2 + else iprot.readString() + ) + self.success.append(_elem1368) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_tables_by_type_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.LIST, 0) + oprot.writeListBegin(TType.STRING, len(self.success)) + for iter1369 in self.success: + oprot.writeString(iter1369.encode("utf-8") if sys.version_info[0] == 2 else iter1369) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_tables_by_type_result) +get_tables_by_type_result.thrift_spec = ( + ( + 0, + TType.LIST, + "success", + (TType.STRING, "UTF8", False), + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [MetaException, None], + None, + ), # 1 +) + + +class get_all_materialized_view_objects_for_rewriting_args: + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_all_materialized_view_objects_for_rewriting_args") + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_all_materialized_view_objects_for_rewriting_args) +get_all_materialized_view_objects_for_rewriting_args.thrift_spec = () + + +class get_all_materialized_view_objects_for_rewriting_result: + """ + Attributes: + - success + - o1 + + """ + + def __init__( + self, + success=None, + o1=None, + ): + self.success = success + self.o1 = o1 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.LIST: + self.success = [] + (_etype1373, _size1370) = iprot.readListBegin() + for _i1374 in range(_size1370): + _elem1375 = Table() + _elem1375.read(iprot) + self.success.append(_elem1375) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_all_materialized_view_objects_for_rewriting_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.LIST, 0) + oprot.writeListBegin(TType.STRUCT, len(self.success)) + for iter1376 in self.success: + iter1376.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_all_materialized_view_objects_for_rewriting_result) +get_all_materialized_view_objects_for_rewriting_result.thrift_spec = ( + ( + 0, + TType.LIST, + "success", + (TType.STRUCT, [Table, None], False), + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [MetaException, None], + None, + ), # 1 +) + + +class get_materialized_views_for_rewriting_args: + """ + Attributes: + - db_name + + """ + + def __init__( + self, + db_name=None, + ): + self.db_name = db_name + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.db_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_materialized_views_for_rewriting_args") + if self.db_name is not None: + oprot.writeFieldBegin("db_name", TType.STRING, 1) + oprot.writeString(self.db_name.encode("utf-8") if sys.version_info[0] == 2 else self.db_name) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_materialized_views_for_rewriting_args) +get_materialized_views_for_rewriting_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "db_name", + "UTF8", + None, + ), # 1 +) + + +class get_materialized_views_for_rewriting_result: + """ + Attributes: + - success + - o1 + + """ + + def __init__( + self, + success=None, + o1=None, + ): + self.success = success + self.o1 = o1 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.LIST: + self.success = [] + (_etype1380, _size1377) = iprot.readListBegin() + for _i1381 in range(_size1377): + _elem1382 = ( + iprot.readString().decode("utf-8", errors="replace") + if sys.version_info[0] == 2 + else iprot.readString() + ) + self.success.append(_elem1382) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_materialized_views_for_rewriting_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.LIST, 0) + oprot.writeListBegin(TType.STRING, len(self.success)) + for iter1383 in self.success: + oprot.writeString(iter1383.encode("utf-8") if sys.version_info[0] == 2 else iter1383) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_materialized_views_for_rewriting_result) +get_materialized_views_for_rewriting_result.thrift_spec = ( + ( + 0, + TType.LIST, + "success", + (TType.STRING, "UTF8", False), + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [MetaException, None], + None, + ), # 1 +) + + +class get_table_meta_args: + """ + Attributes: + - db_patterns + - tbl_patterns + - tbl_types + + """ + + def __init__( + self, + db_patterns=None, + tbl_patterns=None, + tbl_types=None, + ): + self.db_patterns = db_patterns + self.tbl_patterns = tbl_patterns + self.tbl_types = tbl_types + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.db_patterns = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.tbl_patterns = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.LIST: + self.tbl_types = [] + (_etype1387, _size1384) = iprot.readListBegin() + for _i1388 in range(_size1384): + _elem1389 = ( + iprot.readString().decode("utf-8", errors="replace") + if sys.version_info[0] == 2 + else iprot.readString() + ) + self.tbl_types.append(_elem1389) + iprot.readListEnd() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_table_meta_args") + if self.db_patterns is not None: + oprot.writeFieldBegin("db_patterns", TType.STRING, 1) + oprot.writeString(self.db_patterns.encode("utf-8") if sys.version_info[0] == 2 else self.db_patterns) + oprot.writeFieldEnd() + if self.tbl_patterns is not None: + oprot.writeFieldBegin("tbl_patterns", TType.STRING, 2) + oprot.writeString(self.tbl_patterns.encode("utf-8") if sys.version_info[0] == 2 else self.tbl_patterns) + oprot.writeFieldEnd() + if self.tbl_types is not None: + oprot.writeFieldBegin("tbl_types", TType.LIST, 3) + oprot.writeListBegin(TType.STRING, len(self.tbl_types)) + for iter1390 in self.tbl_types: + oprot.writeString(iter1390.encode("utf-8") if sys.version_info[0] == 2 else iter1390) + oprot.writeListEnd() + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_table_meta_args) +get_table_meta_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "db_patterns", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "tbl_patterns", + "UTF8", + None, + ), # 2 + ( + 3, + TType.LIST, + "tbl_types", + (TType.STRING, "UTF8", False), + None, + ), # 3 +) + + +class get_table_meta_result: + """ + Attributes: + - success + - o1 + + """ + + def __init__( + self, + success=None, + o1=None, + ): + self.success = success + self.o1 = o1 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.LIST: + self.success = [] + (_etype1394, _size1391) = iprot.readListBegin() + for _i1395 in range(_size1391): + _elem1396 = TableMeta() + _elem1396.read(iprot) + self.success.append(_elem1396) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_table_meta_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.LIST, 0) + oprot.writeListBegin(TType.STRUCT, len(self.success)) + for iter1397 in self.success: + iter1397.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_table_meta_result) +get_table_meta_result.thrift_spec = ( + ( + 0, + TType.LIST, + "success", + (TType.STRUCT, [TableMeta, None], False), + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [MetaException, None], + None, + ), # 1 +) + + +class get_all_tables_args: + """ + Attributes: + - db_name + + """ + + def __init__( + self, + db_name=None, + ): + self.db_name = db_name + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.db_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_all_tables_args") + if self.db_name is not None: + oprot.writeFieldBegin("db_name", TType.STRING, 1) + oprot.writeString(self.db_name.encode("utf-8") if sys.version_info[0] == 2 else self.db_name) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_all_tables_args) +get_all_tables_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "db_name", + "UTF8", + None, + ), # 1 +) + + +class get_all_tables_result: + """ + Attributes: + - success + - o1 + + """ + + def __init__( + self, + success=None, + o1=None, + ): + self.success = success + self.o1 = o1 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.LIST: + self.success = [] + (_etype1401, _size1398) = iprot.readListBegin() + for _i1402 in range(_size1398): + _elem1403 = ( + iprot.readString().decode("utf-8", errors="replace") + if sys.version_info[0] == 2 + else iprot.readString() + ) + self.success.append(_elem1403) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_all_tables_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.LIST, 0) + oprot.writeListBegin(TType.STRING, len(self.success)) + for iter1404 in self.success: + oprot.writeString(iter1404.encode("utf-8") if sys.version_info[0] == 2 else iter1404) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_all_tables_result) +get_all_tables_result.thrift_spec = ( + ( + 0, + TType.LIST, + "success", + (TType.STRING, "UTF8", False), + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [MetaException, None], + None, + ), # 1 +) + + +class get_table_args: + """ + Attributes: + - dbname + - tbl_name + + """ + + def __init__( + self, + dbname=None, + tbl_name=None, + ): + self.dbname = dbname + self.tbl_name = tbl_name + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.dbname = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.tbl_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_table_args") + if self.dbname is not None: + oprot.writeFieldBegin("dbname", TType.STRING, 1) + oprot.writeString(self.dbname.encode("utf-8") if sys.version_info[0] == 2 else self.dbname) + oprot.writeFieldEnd() + if self.tbl_name is not None: + oprot.writeFieldBegin("tbl_name", TType.STRING, 2) + oprot.writeString(self.tbl_name.encode("utf-8") if sys.version_info[0] == 2 else self.tbl_name) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_table_args) +get_table_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "dbname", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "tbl_name", + "UTF8", + None, + ), # 2 +) + + +class get_table_result: + """ + Attributes: + - success + - o1 + - o2 + + """ + + def __init__( + self, + success=None, + o1=None, + o2=None, + ): + self.success = success + self.o1 = o1 + self.o2 = o2 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = Table() + self.success.read(iprot) + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = NoSuchObjectException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_table_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_table_result) +get_table_result.thrift_spec = ( + ( + 0, + TType.STRUCT, + "success", + [Table, None], + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [MetaException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [NoSuchObjectException, None], + None, + ), # 2 +) + + +class get_table_objects_by_name_args: + """ + Attributes: + - dbname + - tbl_names + + """ + + def __init__( + self, + dbname=None, + tbl_names=None, + ): + self.dbname = dbname + self.tbl_names = tbl_names + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.dbname = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.LIST: + self.tbl_names = [] + (_etype1408, _size1405) = iprot.readListBegin() + for _i1409 in range(_size1405): + _elem1410 = ( + iprot.readString().decode("utf-8", errors="replace") + if sys.version_info[0] == 2 + else iprot.readString() + ) + self.tbl_names.append(_elem1410) + iprot.readListEnd() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_table_objects_by_name_args") + if self.dbname is not None: + oprot.writeFieldBegin("dbname", TType.STRING, 1) + oprot.writeString(self.dbname.encode("utf-8") if sys.version_info[0] == 2 else self.dbname) + oprot.writeFieldEnd() + if self.tbl_names is not None: + oprot.writeFieldBegin("tbl_names", TType.LIST, 2) + oprot.writeListBegin(TType.STRING, len(self.tbl_names)) + for iter1411 in self.tbl_names: + oprot.writeString(iter1411.encode("utf-8") if sys.version_info[0] == 2 else iter1411) + oprot.writeListEnd() + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_table_objects_by_name_args) +get_table_objects_by_name_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "dbname", + "UTF8", + None, + ), # 1 + ( + 2, + TType.LIST, + "tbl_names", + (TType.STRING, "UTF8", False), + None, + ), # 2 +) + + +class get_table_objects_by_name_result: + """ + Attributes: + - success + + """ + + def __init__( + self, + success=None, + ): + self.success = success + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.LIST: + self.success = [] + (_etype1415, _size1412) = iprot.readListBegin() + for _i1416 in range(_size1412): + _elem1417 = Table() + _elem1417.read(iprot) + self.success.append(_elem1417) + iprot.readListEnd() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_table_objects_by_name_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.LIST, 0) + oprot.writeListBegin(TType.STRUCT, len(self.success)) + for iter1418 in self.success: + iter1418.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_table_objects_by_name_result) +get_table_objects_by_name_result.thrift_spec = ( + ( + 0, + TType.LIST, + "success", + (TType.STRUCT, [Table, None], False), + None, + ), # 0 +) + + +class get_tables_ext_args: + """ + Attributes: + - req + + """ + + def __init__( + self, + req=None, + ): + self.req = req + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.req = GetTablesExtRequest() + self.req.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_tables_ext_args") + if self.req is not None: + oprot.writeFieldBegin("req", TType.STRUCT, 1) + self.req.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_tables_ext_args) +get_tables_ext_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "req", + [GetTablesExtRequest, None], + None, + ), # 1 +) + + +class get_tables_ext_result: + """ + Attributes: + - success + - o1 + + """ + + def __init__( + self, + success=None, + o1=None, + ): + self.success = success + self.o1 = o1 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.LIST: + self.success = [] + (_etype1422, _size1419) = iprot.readListBegin() + for _i1423 in range(_size1419): + _elem1424 = ExtendedTableInfo() + _elem1424.read(iprot) + self.success.append(_elem1424) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_tables_ext_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.LIST, 0) + oprot.writeListBegin(TType.STRUCT, len(self.success)) + for iter1425 in self.success: + iter1425.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_tables_ext_result) +get_tables_ext_result.thrift_spec = ( + ( + 0, + TType.LIST, + "success", + (TType.STRUCT, [ExtendedTableInfo, None], False), + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [MetaException, None], + None, + ), # 1 +) + + +class get_table_req_args: + """ + Attributes: + - req + + """ + + def __init__( + self, + req=None, + ): + self.req = req + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.req = GetTableRequest() + self.req.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_table_req_args") + if self.req is not None: + oprot.writeFieldBegin("req", TType.STRUCT, 1) + self.req.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_table_req_args) +get_table_req_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "req", + [GetTableRequest, None], + None, + ), # 1 +) + + +class get_table_req_result: + """ + Attributes: + - success + - o1 + - o2 + + """ + + def __init__( + self, + success=None, + o1=None, + o2=None, + ): + self.success = success + self.o1 = o1 + self.o2 = o2 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = GetTableResult() + self.success.read(iprot) + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = NoSuchObjectException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_table_req_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_table_req_result) +get_table_req_result.thrift_spec = ( + ( + 0, + TType.STRUCT, + "success", + [GetTableResult, None], + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [MetaException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [NoSuchObjectException, None], + None, + ), # 2 +) + + +class get_table_objects_by_name_req_args: + """ + Attributes: + - req + + """ + + def __init__( + self, + req=None, + ): + self.req = req + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.req = GetTablesRequest() + self.req.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_table_objects_by_name_req_args") + if self.req is not None: + oprot.writeFieldBegin("req", TType.STRUCT, 1) + self.req.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_table_objects_by_name_req_args) +get_table_objects_by_name_req_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "req", + [GetTablesRequest, None], + None, + ), # 1 +) + + +class get_table_objects_by_name_req_result: + """ + Attributes: + - success + - o1 + - o2 + - o3 + + """ + + def __init__( + self, + success=None, + o1=None, + o2=None, + o3=None, + ): + self.success = success + self.o1 = o1 + self.o2 = o2 + self.o3 = o3 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = GetTablesResult() + self.success.read(iprot) + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = InvalidOperationException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRUCT: + self.o3 = UnknownDBException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_table_objects_by_name_req_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + if self.o3 is not None: + oprot.writeFieldBegin("o3", TType.STRUCT, 3) + self.o3.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_table_objects_by_name_req_result) +get_table_objects_by_name_req_result.thrift_spec = ( + ( + 0, + TType.STRUCT, + "success", + [GetTablesResult, None], + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [MetaException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [InvalidOperationException, None], + None, + ), # 2 + ( + 3, + TType.STRUCT, + "o3", + [UnknownDBException, None], + None, + ), # 3 +) + + +class get_materialization_invalidation_info_args: + """ + Attributes: + - creation_metadata + - validTxnList + + """ + + def __init__( + self, + creation_metadata=None, + validTxnList=None, + ): + self.creation_metadata = creation_metadata + self.validTxnList = validTxnList + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.creation_metadata = CreationMetadata() + self.creation_metadata.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.validTxnList = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_materialization_invalidation_info_args") + if self.creation_metadata is not None: + oprot.writeFieldBegin("creation_metadata", TType.STRUCT, 1) + self.creation_metadata.write(oprot) + oprot.writeFieldEnd() + if self.validTxnList is not None: + oprot.writeFieldBegin("validTxnList", TType.STRING, 2) + oprot.writeString(self.validTxnList.encode("utf-8") if sys.version_info[0] == 2 else self.validTxnList) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_materialization_invalidation_info_args) +get_materialization_invalidation_info_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "creation_metadata", + [CreationMetadata, None], + None, + ), # 1 + ( + 2, + TType.STRING, + "validTxnList", + "UTF8", + None, + ), # 2 +) + + +class get_materialization_invalidation_info_result: + """ + Attributes: + - success + - o1 + - o2 + - o3 + + """ + + def __init__( + self, + success=None, + o1=None, + o2=None, + o3=None, + ): + self.success = success + self.o1 = o1 + self.o2 = o2 + self.o3 = o3 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = Materialization() + self.success.read(iprot) + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = InvalidOperationException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRUCT: + self.o3 = UnknownDBException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_materialization_invalidation_info_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + if self.o3 is not None: + oprot.writeFieldBegin("o3", TType.STRUCT, 3) + self.o3.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_materialization_invalidation_info_result) +get_materialization_invalidation_info_result.thrift_spec = ( + ( + 0, + TType.STRUCT, + "success", + [Materialization, None], + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [MetaException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [InvalidOperationException, None], + None, + ), # 2 + ( + 3, + TType.STRUCT, + "o3", + [UnknownDBException, None], + None, + ), # 3 +) + + +class update_creation_metadata_args: + """ + Attributes: + - catName + - dbname + - tbl_name + - creation_metadata + + """ + + def __init__( + self, + catName=None, + dbname=None, + tbl_name=None, + creation_metadata=None, + ): + self.catName = catName + self.dbname = dbname + self.tbl_name = tbl_name + self.creation_metadata = creation_metadata + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.catName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.dbname = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.tbl_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRUCT: + self.creation_metadata = CreationMetadata() + self.creation_metadata.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("update_creation_metadata_args") + if self.catName is not None: + oprot.writeFieldBegin("catName", TType.STRING, 1) + oprot.writeString(self.catName.encode("utf-8") if sys.version_info[0] == 2 else self.catName) + oprot.writeFieldEnd() + if self.dbname is not None: + oprot.writeFieldBegin("dbname", TType.STRING, 2) + oprot.writeString(self.dbname.encode("utf-8") if sys.version_info[0] == 2 else self.dbname) + oprot.writeFieldEnd() + if self.tbl_name is not None: + oprot.writeFieldBegin("tbl_name", TType.STRING, 3) + oprot.writeString(self.tbl_name.encode("utf-8") if sys.version_info[0] == 2 else self.tbl_name) + oprot.writeFieldEnd() + if self.creation_metadata is not None: + oprot.writeFieldBegin("creation_metadata", TType.STRUCT, 4) + self.creation_metadata.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(update_creation_metadata_args) +update_creation_metadata_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "catName", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "dbname", + "UTF8", + None, + ), # 2 + ( + 3, + TType.STRING, + "tbl_name", + "UTF8", + None, + ), # 3 + ( + 4, + TType.STRUCT, + "creation_metadata", + [CreationMetadata, None], + None, + ), # 4 +) + + +class update_creation_metadata_result: + """ + Attributes: + - o1 + - o2 + - o3 + + """ + + def __init__( + self, + o1=None, + o2=None, + o3=None, + ): + self.o1 = o1 + self.o2 = o2 + self.o3 = o3 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = InvalidOperationException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRUCT: + self.o3 = UnknownDBException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("update_creation_metadata_result") + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + if self.o3 is not None: + oprot.writeFieldBegin("o3", TType.STRUCT, 3) + self.o3.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(update_creation_metadata_result) +update_creation_metadata_result.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "o1", + [MetaException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [InvalidOperationException, None], + None, + ), # 2 + ( + 3, + TType.STRUCT, + "o3", + [UnknownDBException, None], + None, + ), # 3 +) + + +class get_table_names_by_filter_args: + """ + Attributes: + - dbname + - filter + - max_tables + + """ + + def __init__( + self, + dbname=None, + filter=None, + max_tables=-1, + ): + self.dbname = dbname + self.filter = filter + self.max_tables = max_tables + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.dbname = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.filter = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.I16: + self.max_tables = iprot.readI16() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_table_names_by_filter_args") + if self.dbname is not None: + oprot.writeFieldBegin("dbname", TType.STRING, 1) + oprot.writeString(self.dbname.encode("utf-8") if sys.version_info[0] == 2 else self.dbname) + oprot.writeFieldEnd() + if self.filter is not None: + oprot.writeFieldBegin("filter", TType.STRING, 2) + oprot.writeString(self.filter.encode("utf-8") if sys.version_info[0] == 2 else self.filter) + oprot.writeFieldEnd() + if self.max_tables is not None: + oprot.writeFieldBegin("max_tables", TType.I16, 3) + oprot.writeI16(self.max_tables) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_table_names_by_filter_args) +get_table_names_by_filter_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "dbname", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "filter", + "UTF8", + None, + ), # 2 + ( + 3, + TType.I16, + "max_tables", + None, + -1, + ), # 3 +) + + +class get_table_names_by_filter_result: + """ + Attributes: + - success + - o1 + - o2 + - o3 + + """ + + def __init__( + self, + success=None, + o1=None, + o2=None, + o3=None, + ): + self.success = success + self.o1 = o1 + self.o2 = o2 + self.o3 = o3 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.LIST: + self.success = [] + (_etype1429, _size1426) = iprot.readListBegin() + for _i1430 in range(_size1426): + _elem1431 = ( + iprot.readString().decode("utf-8", errors="replace") + if sys.version_info[0] == 2 + else iprot.readString() + ) + self.success.append(_elem1431) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = InvalidOperationException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRUCT: + self.o3 = UnknownDBException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_table_names_by_filter_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.LIST, 0) + oprot.writeListBegin(TType.STRING, len(self.success)) + for iter1432 in self.success: + oprot.writeString(iter1432.encode("utf-8") if sys.version_info[0] == 2 else iter1432) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + if self.o3 is not None: + oprot.writeFieldBegin("o3", TType.STRUCT, 3) + self.o3.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_table_names_by_filter_result) +get_table_names_by_filter_result.thrift_spec = ( + ( + 0, + TType.LIST, + "success", + (TType.STRING, "UTF8", False), + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [MetaException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [InvalidOperationException, None], + None, + ), # 2 + ( + 3, + TType.STRUCT, + "o3", + [UnknownDBException, None], + None, + ), # 3 +) + + +class alter_table_args: + """ + Attributes: + - dbname + - tbl_name + - new_tbl + + """ + + def __init__( + self, + dbname=None, + tbl_name=None, + new_tbl=None, + ): + self.dbname = dbname + self.tbl_name = tbl_name + self.new_tbl = new_tbl + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.dbname = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.tbl_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRUCT: + self.new_tbl = Table() + self.new_tbl.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("alter_table_args") + if self.dbname is not None: + oprot.writeFieldBegin("dbname", TType.STRING, 1) + oprot.writeString(self.dbname.encode("utf-8") if sys.version_info[0] == 2 else self.dbname) + oprot.writeFieldEnd() + if self.tbl_name is not None: + oprot.writeFieldBegin("tbl_name", TType.STRING, 2) + oprot.writeString(self.tbl_name.encode("utf-8") if sys.version_info[0] == 2 else self.tbl_name) + oprot.writeFieldEnd() + if self.new_tbl is not None: + oprot.writeFieldBegin("new_tbl", TType.STRUCT, 3) + self.new_tbl.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(alter_table_args) +alter_table_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "dbname", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "tbl_name", + "UTF8", + None, + ), # 2 + ( + 3, + TType.STRUCT, + "new_tbl", + [Table, None], + None, + ), # 3 +) + + +class alter_table_result: + """ + Attributes: + - o1 + - o2 + + """ + + def __init__( + self, + o1=None, + o2=None, + ): + self.o1 = o1 + self.o2 = o2 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.o1 = InvalidOperationException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("alter_table_result") + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(alter_table_result) +alter_table_result.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "o1", + [InvalidOperationException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [MetaException, None], + None, + ), # 2 +) + + +class alter_table_with_environment_context_args: + """ + Attributes: + - dbname + - tbl_name + - new_tbl + - environment_context + + """ + + def __init__( + self, + dbname=None, + tbl_name=None, + new_tbl=None, + environment_context=None, + ): + self.dbname = dbname + self.tbl_name = tbl_name + self.new_tbl = new_tbl + self.environment_context = environment_context + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.dbname = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.tbl_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRUCT: + self.new_tbl = Table() + self.new_tbl.read(iprot) + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRUCT: + self.environment_context = EnvironmentContext() + self.environment_context.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("alter_table_with_environment_context_args") + if self.dbname is not None: + oprot.writeFieldBegin("dbname", TType.STRING, 1) + oprot.writeString(self.dbname.encode("utf-8") if sys.version_info[0] == 2 else self.dbname) + oprot.writeFieldEnd() + if self.tbl_name is not None: + oprot.writeFieldBegin("tbl_name", TType.STRING, 2) + oprot.writeString(self.tbl_name.encode("utf-8") if sys.version_info[0] == 2 else self.tbl_name) + oprot.writeFieldEnd() + if self.new_tbl is not None: + oprot.writeFieldBegin("new_tbl", TType.STRUCT, 3) + self.new_tbl.write(oprot) + oprot.writeFieldEnd() + if self.environment_context is not None: + oprot.writeFieldBegin("environment_context", TType.STRUCT, 4) + self.environment_context.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(alter_table_with_environment_context_args) +alter_table_with_environment_context_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "dbname", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "tbl_name", + "UTF8", + None, + ), # 2 + ( + 3, + TType.STRUCT, + "new_tbl", + [Table, None], + None, + ), # 3 + ( + 4, + TType.STRUCT, + "environment_context", + [EnvironmentContext, None], + None, + ), # 4 +) + + +class alter_table_with_environment_context_result: + """ + Attributes: + - o1 + - o2 + + """ + + def __init__( + self, + o1=None, + o2=None, + ): + self.o1 = o1 + self.o2 = o2 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.o1 = InvalidOperationException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("alter_table_with_environment_context_result") + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(alter_table_with_environment_context_result) +alter_table_with_environment_context_result.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "o1", + [InvalidOperationException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [MetaException, None], + None, + ), # 2 +) + + +class alter_table_with_cascade_args: + """ + Attributes: + - dbname + - tbl_name + - new_tbl + - cascade + + """ + + def __init__( + self, + dbname=None, + tbl_name=None, + new_tbl=None, + cascade=None, + ): + self.dbname = dbname + self.tbl_name = tbl_name + self.new_tbl = new_tbl + self.cascade = cascade + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.dbname = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.tbl_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRUCT: + self.new_tbl = Table() + self.new_tbl.read(iprot) + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.BOOL: + self.cascade = iprot.readBool() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("alter_table_with_cascade_args") + if self.dbname is not None: + oprot.writeFieldBegin("dbname", TType.STRING, 1) + oprot.writeString(self.dbname.encode("utf-8") if sys.version_info[0] == 2 else self.dbname) + oprot.writeFieldEnd() + if self.tbl_name is not None: + oprot.writeFieldBegin("tbl_name", TType.STRING, 2) + oprot.writeString(self.tbl_name.encode("utf-8") if sys.version_info[0] == 2 else self.tbl_name) + oprot.writeFieldEnd() + if self.new_tbl is not None: + oprot.writeFieldBegin("new_tbl", TType.STRUCT, 3) + self.new_tbl.write(oprot) + oprot.writeFieldEnd() + if self.cascade is not None: + oprot.writeFieldBegin("cascade", TType.BOOL, 4) + oprot.writeBool(self.cascade) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(alter_table_with_cascade_args) +alter_table_with_cascade_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "dbname", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "tbl_name", + "UTF8", + None, + ), # 2 + ( + 3, + TType.STRUCT, + "new_tbl", + [Table, None], + None, + ), # 3 + ( + 4, + TType.BOOL, + "cascade", + None, + None, + ), # 4 +) + + +class alter_table_with_cascade_result: + """ + Attributes: + - o1 + - o2 + + """ + + def __init__( + self, + o1=None, + o2=None, + ): + self.o1 = o1 + self.o2 = o2 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.o1 = InvalidOperationException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("alter_table_with_cascade_result") + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(alter_table_with_cascade_result) +alter_table_with_cascade_result.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "o1", + [InvalidOperationException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [MetaException, None], + None, + ), # 2 +) + + +class alter_table_req_args: + """ + Attributes: + - req + + """ + + def __init__( + self, + req=None, + ): + self.req = req + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.req = AlterTableRequest() + self.req.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("alter_table_req_args") + if self.req is not None: + oprot.writeFieldBegin("req", TType.STRUCT, 1) + self.req.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(alter_table_req_args) +alter_table_req_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "req", + [AlterTableRequest, None], + None, + ), # 1 +) + + +class alter_table_req_result: + """ + Attributes: + - success + - o1 + - o2 + + """ + + def __init__( + self, + success=None, + o1=None, + o2=None, + ): + self.success = success + self.o1 = o1 + self.o2 = o2 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = AlterTableResponse() + self.success.read(iprot) + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = InvalidOperationException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("alter_table_req_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(alter_table_req_result) +alter_table_req_result.thrift_spec = ( + ( + 0, + TType.STRUCT, + "success", + [AlterTableResponse, None], + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [InvalidOperationException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [MetaException, None], + None, + ), # 2 +) + + +class add_partition_args: + """ + Attributes: + - new_part + + """ + + def __init__( + self, + new_part=None, + ): + self.new_part = new_part + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.new_part = Partition() + self.new_part.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("add_partition_args") + if self.new_part is not None: + oprot.writeFieldBegin("new_part", TType.STRUCT, 1) + self.new_part.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(add_partition_args) +add_partition_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "new_part", + [Partition, None], + None, + ), # 1 +) + + +class add_partition_result: + """ + Attributes: + - success + - o1 + - o2 + - o3 + + """ + + def __init__( + self, + success=None, + o1=None, + o2=None, + o3=None, + ): + self.success = success + self.o1 = o1 + self.o2 = o2 + self.o3 = o3 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = Partition() + self.success.read(iprot) + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = InvalidObjectException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = AlreadyExistsException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRUCT: + self.o3 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("add_partition_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + if self.o3 is not None: + oprot.writeFieldBegin("o3", TType.STRUCT, 3) + self.o3.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(add_partition_result) +add_partition_result.thrift_spec = ( + ( + 0, + TType.STRUCT, + "success", + [Partition, None], + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [InvalidObjectException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [AlreadyExistsException, None], + None, + ), # 2 + ( + 3, + TType.STRUCT, + "o3", + [MetaException, None], + None, + ), # 3 +) + + +class add_partition_with_environment_context_args: + """ + Attributes: + - new_part + - environment_context + + """ + + def __init__( + self, + new_part=None, + environment_context=None, + ): + self.new_part = new_part + self.environment_context = environment_context + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.new_part = Partition() + self.new_part.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.environment_context = EnvironmentContext() + self.environment_context.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("add_partition_with_environment_context_args") + if self.new_part is not None: + oprot.writeFieldBegin("new_part", TType.STRUCT, 1) + self.new_part.write(oprot) + oprot.writeFieldEnd() + if self.environment_context is not None: + oprot.writeFieldBegin("environment_context", TType.STRUCT, 2) + self.environment_context.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(add_partition_with_environment_context_args) +add_partition_with_environment_context_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "new_part", + [Partition, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "environment_context", + [EnvironmentContext, None], + None, + ), # 2 +) + + +class add_partition_with_environment_context_result: + """ + Attributes: + - success + - o1 + - o2 + - o3 + + """ + + def __init__( + self, + success=None, + o1=None, + o2=None, + o3=None, + ): + self.success = success + self.o1 = o1 + self.o2 = o2 + self.o3 = o3 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = Partition() + self.success.read(iprot) + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = InvalidObjectException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = AlreadyExistsException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRUCT: + self.o3 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("add_partition_with_environment_context_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + if self.o3 is not None: + oprot.writeFieldBegin("o3", TType.STRUCT, 3) + self.o3.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(add_partition_with_environment_context_result) +add_partition_with_environment_context_result.thrift_spec = ( + ( + 0, + TType.STRUCT, + "success", + [Partition, None], + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [InvalidObjectException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [AlreadyExistsException, None], + None, + ), # 2 + ( + 3, + TType.STRUCT, + "o3", + [MetaException, None], + None, + ), # 3 +) + + +class add_partitions_args: + """ + Attributes: + - new_parts + + """ + + def __init__( + self, + new_parts=None, + ): + self.new_parts = new_parts + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.LIST: + self.new_parts = [] + (_etype1436, _size1433) = iprot.readListBegin() + for _i1437 in range(_size1433): + _elem1438 = Partition() + _elem1438.read(iprot) + self.new_parts.append(_elem1438) + iprot.readListEnd() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("add_partitions_args") + if self.new_parts is not None: + oprot.writeFieldBegin("new_parts", TType.LIST, 1) + oprot.writeListBegin(TType.STRUCT, len(self.new_parts)) + for iter1439 in self.new_parts: + iter1439.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(add_partitions_args) +add_partitions_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.LIST, + "new_parts", + (TType.STRUCT, [Partition, None], False), + None, + ), # 1 +) + + +class add_partitions_result: + """ + Attributes: + - success + - o1 + - o2 + - o3 + + """ + + def __init__( + self, + success=None, + o1=None, + o2=None, + o3=None, + ): + self.success = success + self.o1 = o1 + self.o2 = o2 + self.o3 = o3 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.I32: + self.success = iprot.readI32() + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = InvalidObjectException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = AlreadyExistsException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRUCT: + self.o3 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("add_partitions_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.I32, 0) + oprot.writeI32(self.success) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + if self.o3 is not None: + oprot.writeFieldBegin("o3", TType.STRUCT, 3) + self.o3.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(add_partitions_result) +add_partitions_result.thrift_spec = ( + ( + 0, + TType.I32, + "success", + None, + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [InvalidObjectException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [AlreadyExistsException, None], + None, + ), # 2 + ( + 3, + TType.STRUCT, + "o3", + [MetaException, None], + None, + ), # 3 +) + + +class add_partitions_pspec_args: + """ + Attributes: + - new_parts + + """ + + def __init__( + self, + new_parts=None, + ): + self.new_parts = new_parts + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.LIST: + self.new_parts = [] + (_etype1443, _size1440) = iprot.readListBegin() + for _i1444 in range(_size1440): + _elem1445 = PartitionSpec() + _elem1445.read(iprot) + self.new_parts.append(_elem1445) + iprot.readListEnd() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("add_partitions_pspec_args") + if self.new_parts is not None: + oprot.writeFieldBegin("new_parts", TType.LIST, 1) + oprot.writeListBegin(TType.STRUCT, len(self.new_parts)) + for iter1446 in self.new_parts: + iter1446.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(add_partitions_pspec_args) +add_partitions_pspec_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.LIST, + "new_parts", + (TType.STRUCT, [PartitionSpec, None], False), + None, + ), # 1 +) + + +class add_partitions_pspec_result: + """ + Attributes: + - success + - o1 + - o2 + - o3 + + """ + + def __init__( + self, + success=None, + o1=None, + o2=None, + o3=None, + ): + self.success = success + self.o1 = o1 + self.o2 = o2 + self.o3 = o3 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.I32: + self.success = iprot.readI32() + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = InvalidObjectException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = AlreadyExistsException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRUCT: + self.o3 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("add_partitions_pspec_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.I32, 0) + oprot.writeI32(self.success) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + if self.o3 is not None: + oprot.writeFieldBegin("o3", TType.STRUCT, 3) + self.o3.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(add_partitions_pspec_result) +add_partitions_pspec_result.thrift_spec = ( + ( + 0, + TType.I32, + "success", + None, + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [InvalidObjectException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [AlreadyExistsException, None], + None, + ), # 2 + ( + 3, + TType.STRUCT, + "o3", + [MetaException, None], + None, + ), # 3 +) + + +class append_partition_args: + """ + Attributes: + - db_name + - tbl_name + - part_vals + + """ + + def __init__( + self, + db_name=None, + tbl_name=None, + part_vals=None, + ): + self.db_name = db_name + self.tbl_name = tbl_name + self.part_vals = part_vals + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.db_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.tbl_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.LIST: + self.part_vals = [] + (_etype1450, _size1447) = iprot.readListBegin() + for _i1451 in range(_size1447): + _elem1452 = ( + iprot.readString().decode("utf-8", errors="replace") + if sys.version_info[0] == 2 + else iprot.readString() + ) + self.part_vals.append(_elem1452) + iprot.readListEnd() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("append_partition_args") + if self.db_name is not None: + oprot.writeFieldBegin("db_name", TType.STRING, 1) + oprot.writeString(self.db_name.encode("utf-8") if sys.version_info[0] == 2 else self.db_name) + oprot.writeFieldEnd() + if self.tbl_name is not None: + oprot.writeFieldBegin("tbl_name", TType.STRING, 2) + oprot.writeString(self.tbl_name.encode("utf-8") if sys.version_info[0] == 2 else self.tbl_name) + oprot.writeFieldEnd() + if self.part_vals is not None: + oprot.writeFieldBegin("part_vals", TType.LIST, 3) + oprot.writeListBegin(TType.STRING, len(self.part_vals)) + for iter1453 in self.part_vals: + oprot.writeString(iter1453.encode("utf-8") if sys.version_info[0] == 2 else iter1453) + oprot.writeListEnd() + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(append_partition_args) +append_partition_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "db_name", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "tbl_name", + "UTF8", + None, + ), # 2 + ( + 3, + TType.LIST, + "part_vals", + (TType.STRING, "UTF8", False), + None, + ), # 3 +) + + +class append_partition_result: + """ + Attributes: + - success + - o1 + - o2 + - o3 + + """ + + def __init__( + self, + success=None, + o1=None, + o2=None, + o3=None, + ): + self.success = success + self.o1 = o1 + self.o2 = o2 + self.o3 = o3 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = Partition() + self.success.read(iprot) + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = InvalidObjectException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = AlreadyExistsException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRUCT: + self.o3 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("append_partition_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + if self.o3 is not None: + oprot.writeFieldBegin("o3", TType.STRUCT, 3) + self.o3.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(append_partition_result) +append_partition_result.thrift_spec = ( + ( + 0, + TType.STRUCT, + "success", + [Partition, None], + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [InvalidObjectException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [AlreadyExistsException, None], + None, + ), # 2 + ( + 3, + TType.STRUCT, + "o3", + [MetaException, None], + None, + ), # 3 +) + + +class add_partitions_req_args: + """ + Attributes: + - request + + """ + + def __init__( + self, + request=None, + ): + self.request = request + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.request = AddPartitionsRequest() + self.request.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("add_partitions_req_args") + if self.request is not None: + oprot.writeFieldBegin("request", TType.STRUCT, 1) + self.request.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(add_partitions_req_args) +add_partitions_req_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "request", + [AddPartitionsRequest, None], + None, + ), # 1 +) + + +class add_partitions_req_result: + """ + Attributes: + - success + - o1 + - o2 + - o3 + + """ + + def __init__( + self, + success=None, + o1=None, + o2=None, + o3=None, + ): + self.success = success + self.o1 = o1 + self.o2 = o2 + self.o3 = o3 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = AddPartitionsResult() + self.success.read(iprot) + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = InvalidObjectException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = AlreadyExistsException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRUCT: + self.o3 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("add_partitions_req_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + if self.o3 is not None: + oprot.writeFieldBegin("o3", TType.STRUCT, 3) + self.o3.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(add_partitions_req_result) +add_partitions_req_result.thrift_spec = ( + ( + 0, + TType.STRUCT, + "success", + [AddPartitionsResult, None], + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [InvalidObjectException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [AlreadyExistsException, None], + None, + ), # 2 + ( + 3, + TType.STRUCT, + "o3", + [MetaException, None], + None, + ), # 3 +) + + +class append_partition_with_environment_context_args: + """ + Attributes: + - db_name + - tbl_name + - part_vals + - environment_context + + """ + + def __init__( + self, + db_name=None, + tbl_name=None, + part_vals=None, + environment_context=None, + ): + self.db_name = db_name + self.tbl_name = tbl_name + self.part_vals = part_vals + self.environment_context = environment_context + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.db_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.tbl_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.LIST: + self.part_vals = [] + (_etype1457, _size1454) = iprot.readListBegin() + for _i1458 in range(_size1454): + _elem1459 = ( + iprot.readString().decode("utf-8", errors="replace") + if sys.version_info[0] == 2 + else iprot.readString() + ) + self.part_vals.append(_elem1459) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRUCT: + self.environment_context = EnvironmentContext() + self.environment_context.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("append_partition_with_environment_context_args") + if self.db_name is not None: + oprot.writeFieldBegin("db_name", TType.STRING, 1) + oprot.writeString(self.db_name.encode("utf-8") if sys.version_info[0] == 2 else self.db_name) + oprot.writeFieldEnd() + if self.tbl_name is not None: + oprot.writeFieldBegin("tbl_name", TType.STRING, 2) + oprot.writeString(self.tbl_name.encode("utf-8") if sys.version_info[0] == 2 else self.tbl_name) + oprot.writeFieldEnd() + if self.part_vals is not None: + oprot.writeFieldBegin("part_vals", TType.LIST, 3) + oprot.writeListBegin(TType.STRING, len(self.part_vals)) + for iter1460 in self.part_vals: + oprot.writeString(iter1460.encode("utf-8") if sys.version_info[0] == 2 else iter1460) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.environment_context is not None: + oprot.writeFieldBegin("environment_context", TType.STRUCT, 4) + self.environment_context.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(append_partition_with_environment_context_args) +append_partition_with_environment_context_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "db_name", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "tbl_name", + "UTF8", + None, + ), # 2 + ( + 3, + TType.LIST, + "part_vals", + (TType.STRING, "UTF8", False), + None, + ), # 3 + ( + 4, + TType.STRUCT, + "environment_context", + [EnvironmentContext, None], + None, + ), # 4 +) + + +class append_partition_with_environment_context_result: + """ + Attributes: + - success + - o1 + - o2 + - o3 + + """ + + def __init__( + self, + success=None, + o1=None, + o2=None, + o3=None, + ): + self.success = success + self.o1 = o1 + self.o2 = o2 + self.o3 = o3 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = Partition() + self.success.read(iprot) + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = InvalidObjectException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = AlreadyExistsException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRUCT: + self.o3 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("append_partition_with_environment_context_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + if self.o3 is not None: + oprot.writeFieldBegin("o3", TType.STRUCT, 3) + self.o3.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(append_partition_with_environment_context_result) +append_partition_with_environment_context_result.thrift_spec = ( + ( + 0, + TType.STRUCT, + "success", + [Partition, None], + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [InvalidObjectException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [AlreadyExistsException, None], + None, + ), # 2 + ( + 3, + TType.STRUCT, + "o3", + [MetaException, None], + None, + ), # 3 +) + + +class append_partition_by_name_args: + """ + Attributes: + - db_name + - tbl_name + - part_name + + """ + + def __init__( + self, + db_name=None, + tbl_name=None, + part_name=None, + ): + self.db_name = db_name + self.tbl_name = tbl_name + self.part_name = part_name + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.db_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.tbl_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.part_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("append_partition_by_name_args") + if self.db_name is not None: + oprot.writeFieldBegin("db_name", TType.STRING, 1) + oprot.writeString(self.db_name.encode("utf-8") if sys.version_info[0] == 2 else self.db_name) + oprot.writeFieldEnd() + if self.tbl_name is not None: + oprot.writeFieldBegin("tbl_name", TType.STRING, 2) + oprot.writeString(self.tbl_name.encode("utf-8") if sys.version_info[0] == 2 else self.tbl_name) + oprot.writeFieldEnd() + if self.part_name is not None: + oprot.writeFieldBegin("part_name", TType.STRING, 3) + oprot.writeString(self.part_name.encode("utf-8") if sys.version_info[0] == 2 else self.part_name) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(append_partition_by_name_args) +append_partition_by_name_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "db_name", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "tbl_name", + "UTF8", + None, + ), # 2 + ( + 3, + TType.STRING, + "part_name", + "UTF8", + None, + ), # 3 +) + + +class append_partition_by_name_result: + """ + Attributes: + - success + - o1 + - o2 + - o3 + + """ + + def __init__( + self, + success=None, + o1=None, + o2=None, + o3=None, + ): + self.success = success + self.o1 = o1 + self.o2 = o2 + self.o3 = o3 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = Partition() + self.success.read(iprot) + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = InvalidObjectException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = AlreadyExistsException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRUCT: + self.o3 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("append_partition_by_name_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + if self.o3 is not None: + oprot.writeFieldBegin("o3", TType.STRUCT, 3) + self.o3.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(append_partition_by_name_result) +append_partition_by_name_result.thrift_spec = ( + ( + 0, + TType.STRUCT, + "success", + [Partition, None], + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [InvalidObjectException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [AlreadyExistsException, None], + None, + ), # 2 + ( + 3, + TType.STRUCT, + "o3", + [MetaException, None], + None, + ), # 3 +) + + +class append_partition_by_name_with_environment_context_args: + """ + Attributes: + - db_name + - tbl_name + - part_name + - environment_context + + """ + + def __init__( + self, + db_name=None, + tbl_name=None, + part_name=None, + environment_context=None, + ): + self.db_name = db_name + self.tbl_name = tbl_name + self.part_name = part_name + self.environment_context = environment_context + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.db_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.tbl_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.part_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRUCT: + self.environment_context = EnvironmentContext() + self.environment_context.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("append_partition_by_name_with_environment_context_args") + if self.db_name is not None: + oprot.writeFieldBegin("db_name", TType.STRING, 1) + oprot.writeString(self.db_name.encode("utf-8") if sys.version_info[0] == 2 else self.db_name) + oprot.writeFieldEnd() + if self.tbl_name is not None: + oprot.writeFieldBegin("tbl_name", TType.STRING, 2) + oprot.writeString(self.tbl_name.encode("utf-8") if sys.version_info[0] == 2 else self.tbl_name) + oprot.writeFieldEnd() + if self.part_name is not None: + oprot.writeFieldBegin("part_name", TType.STRING, 3) + oprot.writeString(self.part_name.encode("utf-8") if sys.version_info[0] == 2 else self.part_name) + oprot.writeFieldEnd() + if self.environment_context is not None: + oprot.writeFieldBegin("environment_context", TType.STRUCT, 4) + self.environment_context.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(append_partition_by_name_with_environment_context_args) +append_partition_by_name_with_environment_context_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "db_name", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "tbl_name", + "UTF8", + None, + ), # 2 + ( + 3, + TType.STRING, + "part_name", + "UTF8", + None, + ), # 3 + ( + 4, + TType.STRUCT, + "environment_context", + [EnvironmentContext, None], + None, + ), # 4 +) + + +class append_partition_by_name_with_environment_context_result: + """ + Attributes: + - success + - o1 + - o2 + - o3 + + """ + + def __init__( + self, + success=None, + o1=None, + o2=None, + o3=None, + ): + self.success = success + self.o1 = o1 + self.o2 = o2 + self.o3 = o3 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = Partition() + self.success.read(iprot) + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = InvalidObjectException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = AlreadyExistsException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRUCT: + self.o3 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("append_partition_by_name_with_environment_context_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + if self.o3 is not None: + oprot.writeFieldBegin("o3", TType.STRUCT, 3) + self.o3.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(append_partition_by_name_with_environment_context_result) +append_partition_by_name_with_environment_context_result.thrift_spec = ( + ( + 0, + TType.STRUCT, + "success", + [Partition, None], + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [InvalidObjectException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [AlreadyExistsException, None], + None, + ), # 2 + ( + 3, + TType.STRUCT, + "o3", + [MetaException, None], + None, + ), # 3 +) + + +class drop_partition_args: + """ + Attributes: + - db_name + - tbl_name + - part_vals + - deleteData + + """ + + def __init__( + self, + db_name=None, + tbl_name=None, + part_vals=None, + deleteData=None, + ): + self.db_name = db_name + self.tbl_name = tbl_name + self.part_vals = part_vals + self.deleteData = deleteData + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.db_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.tbl_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.LIST: + self.part_vals = [] + (_etype1464, _size1461) = iprot.readListBegin() + for _i1465 in range(_size1461): + _elem1466 = ( + iprot.readString().decode("utf-8", errors="replace") + if sys.version_info[0] == 2 + else iprot.readString() + ) + self.part_vals.append(_elem1466) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.BOOL: + self.deleteData = iprot.readBool() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("drop_partition_args") + if self.db_name is not None: + oprot.writeFieldBegin("db_name", TType.STRING, 1) + oprot.writeString(self.db_name.encode("utf-8") if sys.version_info[0] == 2 else self.db_name) + oprot.writeFieldEnd() + if self.tbl_name is not None: + oprot.writeFieldBegin("tbl_name", TType.STRING, 2) + oprot.writeString(self.tbl_name.encode("utf-8") if sys.version_info[0] == 2 else self.tbl_name) + oprot.writeFieldEnd() + if self.part_vals is not None: + oprot.writeFieldBegin("part_vals", TType.LIST, 3) + oprot.writeListBegin(TType.STRING, len(self.part_vals)) + for iter1467 in self.part_vals: + oprot.writeString(iter1467.encode("utf-8") if sys.version_info[0] == 2 else iter1467) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.deleteData is not None: + oprot.writeFieldBegin("deleteData", TType.BOOL, 4) + oprot.writeBool(self.deleteData) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(drop_partition_args) +drop_partition_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "db_name", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "tbl_name", + "UTF8", + None, + ), # 2 + ( + 3, + TType.LIST, + "part_vals", + (TType.STRING, "UTF8", False), + None, + ), # 3 + ( + 4, + TType.BOOL, + "deleteData", + None, + None, + ), # 4 +) + + +class drop_partition_result: + """ + Attributes: + - success + - o1 + - o2 + + """ + + def __init__( + self, + success=None, + o1=None, + o2=None, + ): + self.success = success + self.o1 = o1 + self.o2 = o2 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.BOOL: + self.success = iprot.readBool() + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = NoSuchObjectException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("drop_partition_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.BOOL, 0) + oprot.writeBool(self.success) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(drop_partition_result) +drop_partition_result.thrift_spec = ( + ( + 0, + TType.BOOL, + "success", + None, + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [NoSuchObjectException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [MetaException, None], + None, + ), # 2 +) + + +class drop_partition_with_environment_context_args: + """ + Attributes: + - db_name + - tbl_name + - part_vals + - deleteData + - environment_context + + """ + + def __init__( + self, + db_name=None, + tbl_name=None, + part_vals=None, + deleteData=None, + environment_context=None, + ): + self.db_name = db_name + self.tbl_name = tbl_name + self.part_vals = part_vals + self.deleteData = deleteData + self.environment_context = environment_context + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.db_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.tbl_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.LIST: + self.part_vals = [] + (_etype1471, _size1468) = iprot.readListBegin() + for _i1472 in range(_size1468): + _elem1473 = ( + iprot.readString().decode("utf-8", errors="replace") + if sys.version_info[0] == 2 + else iprot.readString() + ) + self.part_vals.append(_elem1473) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.BOOL: + self.deleteData = iprot.readBool() + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.STRUCT: + self.environment_context = EnvironmentContext() + self.environment_context.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("drop_partition_with_environment_context_args") + if self.db_name is not None: + oprot.writeFieldBegin("db_name", TType.STRING, 1) + oprot.writeString(self.db_name.encode("utf-8") if sys.version_info[0] == 2 else self.db_name) + oprot.writeFieldEnd() + if self.tbl_name is not None: + oprot.writeFieldBegin("tbl_name", TType.STRING, 2) + oprot.writeString(self.tbl_name.encode("utf-8") if sys.version_info[0] == 2 else self.tbl_name) + oprot.writeFieldEnd() + if self.part_vals is not None: + oprot.writeFieldBegin("part_vals", TType.LIST, 3) + oprot.writeListBegin(TType.STRING, len(self.part_vals)) + for iter1474 in self.part_vals: + oprot.writeString(iter1474.encode("utf-8") if sys.version_info[0] == 2 else iter1474) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.deleteData is not None: + oprot.writeFieldBegin("deleteData", TType.BOOL, 4) + oprot.writeBool(self.deleteData) + oprot.writeFieldEnd() + if self.environment_context is not None: + oprot.writeFieldBegin("environment_context", TType.STRUCT, 5) + self.environment_context.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(drop_partition_with_environment_context_args) +drop_partition_with_environment_context_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "db_name", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "tbl_name", + "UTF8", + None, + ), # 2 + ( + 3, + TType.LIST, + "part_vals", + (TType.STRING, "UTF8", False), + None, + ), # 3 + ( + 4, + TType.BOOL, + "deleteData", + None, + None, + ), # 4 + ( + 5, + TType.STRUCT, + "environment_context", + [EnvironmentContext, None], + None, + ), # 5 +) + + +class drop_partition_with_environment_context_result: + """ + Attributes: + - success + - o1 + - o2 + + """ + + def __init__( + self, + success=None, + o1=None, + o2=None, + ): + self.success = success + self.o1 = o1 + self.o2 = o2 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.BOOL: + self.success = iprot.readBool() + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = NoSuchObjectException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("drop_partition_with_environment_context_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.BOOL, 0) + oprot.writeBool(self.success) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(drop_partition_with_environment_context_result) +drop_partition_with_environment_context_result.thrift_spec = ( + ( + 0, + TType.BOOL, + "success", + None, + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [NoSuchObjectException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [MetaException, None], + None, + ), # 2 +) + + +class drop_partition_by_name_args: + """ + Attributes: + - db_name + - tbl_name + - part_name + - deleteData + + """ + + def __init__( + self, + db_name=None, + tbl_name=None, + part_name=None, + deleteData=None, + ): + self.db_name = db_name + self.tbl_name = tbl_name + self.part_name = part_name + self.deleteData = deleteData + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.db_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.tbl_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.part_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.BOOL: + self.deleteData = iprot.readBool() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("drop_partition_by_name_args") + if self.db_name is not None: + oprot.writeFieldBegin("db_name", TType.STRING, 1) + oprot.writeString(self.db_name.encode("utf-8") if sys.version_info[0] == 2 else self.db_name) + oprot.writeFieldEnd() + if self.tbl_name is not None: + oprot.writeFieldBegin("tbl_name", TType.STRING, 2) + oprot.writeString(self.tbl_name.encode("utf-8") if sys.version_info[0] == 2 else self.tbl_name) + oprot.writeFieldEnd() + if self.part_name is not None: + oprot.writeFieldBegin("part_name", TType.STRING, 3) + oprot.writeString(self.part_name.encode("utf-8") if sys.version_info[0] == 2 else self.part_name) + oprot.writeFieldEnd() + if self.deleteData is not None: + oprot.writeFieldBegin("deleteData", TType.BOOL, 4) + oprot.writeBool(self.deleteData) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(drop_partition_by_name_args) +drop_partition_by_name_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "db_name", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "tbl_name", + "UTF8", + None, + ), # 2 + ( + 3, + TType.STRING, + "part_name", + "UTF8", + None, + ), # 3 + ( + 4, + TType.BOOL, + "deleteData", + None, + None, + ), # 4 +) + + +class drop_partition_by_name_result: + """ + Attributes: + - success + - o1 + - o2 + + """ + + def __init__( + self, + success=None, + o1=None, + o2=None, + ): + self.success = success + self.o1 = o1 + self.o2 = o2 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.BOOL: + self.success = iprot.readBool() + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = NoSuchObjectException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("drop_partition_by_name_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.BOOL, 0) + oprot.writeBool(self.success) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(drop_partition_by_name_result) +drop_partition_by_name_result.thrift_spec = ( + ( + 0, + TType.BOOL, + "success", + None, + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [NoSuchObjectException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [MetaException, None], + None, + ), # 2 +) + + +class drop_partition_by_name_with_environment_context_args: + """ + Attributes: + - db_name + - tbl_name + - part_name + - deleteData + - environment_context + + """ + + def __init__( + self, + db_name=None, + tbl_name=None, + part_name=None, + deleteData=None, + environment_context=None, + ): + self.db_name = db_name + self.tbl_name = tbl_name + self.part_name = part_name + self.deleteData = deleteData + self.environment_context = environment_context + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.db_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.tbl_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.part_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.BOOL: + self.deleteData = iprot.readBool() + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.STRUCT: + self.environment_context = EnvironmentContext() + self.environment_context.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("drop_partition_by_name_with_environment_context_args") + if self.db_name is not None: + oprot.writeFieldBegin("db_name", TType.STRING, 1) + oprot.writeString(self.db_name.encode("utf-8") if sys.version_info[0] == 2 else self.db_name) + oprot.writeFieldEnd() + if self.tbl_name is not None: + oprot.writeFieldBegin("tbl_name", TType.STRING, 2) + oprot.writeString(self.tbl_name.encode("utf-8") if sys.version_info[0] == 2 else self.tbl_name) + oprot.writeFieldEnd() + if self.part_name is not None: + oprot.writeFieldBegin("part_name", TType.STRING, 3) + oprot.writeString(self.part_name.encode("utf-8") if sys.version_info[0] == 2 else self.part_name) + oprot.writeFieldEnd() + if self.deleteData is not None: + oprot.writeFieldBegin("deleteData", TType.BOOL, 4) + oprot.writeBool(self.deleteData) + oprot.writeFieldEnd() + if self.environment_context is not None: + oprot.writeFieldBegin("environment_context", TType.STRUCT, 5) + self.environment_context.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(drop_partition_by_name_with_environment_context_args) +drop_partition_by_name_with_environment_context_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "db_name", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "tbl_name", + "UTF8", + None, + ), # 2 + ( + 3, + TType.STRING, + "part_name", + "UTF8", + None, + ), # 3 + ( + 4, + TType.BOOL, + "deleteData", + None, + None, + ), # 4 + ( + 5, + TType.STRUCT, + "environment_context", + [EnvironmentContext, None], + None, + ), # 5 +) + + +class drop_partition_by_name_with_environment_context_result: + """ + Attributes: + - success + - o1 + - o2 + + """ + + def __init__( + self, + success=None, + o1=None, + o2=None, + ): + self.success = success + self.o1 = o1 + self.o2 = o2 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.BOOL: + self.success = iprot.readBool() + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = NoSuchObjectException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("drop_partition_by_name_with_environment_context_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.BOOL, 0) + oprot.writeBool(self.success) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(drop_partition_by_name_with_environment_context_result) +drop_partition_by_name_with_environment_context_result.thrift_spec = ( + ( + 0, + TType.BOOL, + "success", + None, + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [NoSuchObjectException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [MetaException, None], + None, + ), # 2 +) + + +class drop_partitions_req_args: + """ + Attributes: + - req + + """ + + def __init__( + self, + req=None, + ): + self.req = req + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.req = DropPartitionsRequest() + self.req.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("drop_partitions_req_args") + if self.req is not None: + oprot.writeFieldBegin("req", TType.STRUCT, 1) + self.req.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(drop_partitions_req_args) +drop_partitions_req_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "req", + [DropPartitionsRequest, None], + None, + ), # 1 +) + + +class drop_partitions_req_result: + """ + Attributes: + - success + - o1 + - o2 + + """ + + def __init__( + self, + success=None, + o1=None, + o2=None, + ): + self.success = success + self.o1 = o1 + self.o2 = o2 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = DropPartitionsResult() + self.success.read(iprot) + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = NoSuchObjectException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("drop_partitions_req_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(drop_partitions_req_result) +drop_partitions_req_result.thrift_spec = ( + ( + 0, + TType.STRUCT, + "success", + [DropPartitionsResult, None], + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [NoSuchObjectException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [MetaException, None], + None, + ), # 2 +) + + +class get_partition_args: + """ + Attributes: + - db_name + - tbl_name + - part_vals + + """ + + def __init__( + self, + db_name=None, + tbl_name=None, + part_vals=None, + ): + self.db_name = db_name + self.tbl_name = tbl_name + self.part_vals = part_vals + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.db_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.tbl_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.LIST: + self.part_vals = [] + (_etype1478, _size1475) = iprot.readListBegin() + for _i1479 in range(_size1475): + _elem1480 = ( + iprot.readString().decode("utf-8", errors="replace") + if sys.version_info[0] == 2 + else iprot.readString() + ) + self.part_vals.append(_elem1480) + iprot.readListEnd() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_partition_args") + if self.db_name is not None: + oprot.writeFieldBegin("db_name", TType.STRING, 1) + oprot.writeString(self.db_name.encode("utf-8") if sys.version_info[0] == 2 else self.db_name) + oprot.writeFieldEnd() + if self.tbl_name is not None: + oprot.writeFieldBegin("tbl_name", TType.STRING, 2) + oprot.writeString(self.tbl_name.encode("utf-8") if sys.version_info[0] == 2 else self.tbl_name) + oprot.writeFieldEnd() + if self.part_vals is not None: + oprot.writeFieldBegin("part_vals", TType.LIST, 3) + oprot.writeListBegin(TType.STRING, len(self.part_vals)) + for iter1481 in self.part_vals: + oprot.writeString(iter1481.encode("utf-8") if sys.version_info[0] == 2 else iter1481) + oprot.writeListEnd() + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_partition_args) +get_partition_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "db_name", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "tbl_name", + "UTF8", + None, + ), # 2 + ( + 3, + TType.LIST, + "part_vals", + (TType.STRING, "UTF8", False), + None, + ), # 3 +) + + +class get_partition_result: + """ + Attributes: + - success + - o1 + - o2 + + """ + + def __init__( + self, + success=None, + o1=None, + o2=None, + ): + self.success = success + self.o1 = o1 + self.o2 = o2 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = Partition() + self.success.read(iprot) + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = NoSuchObjectException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_partition_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_partition_result) +get_partition_result.thrift_spec = ( + ( + 0, + TType.STRUCT, + "success", + [Partition, None], + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [MetaException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [NoSuchObjectException, None], + None, + ), # 2 +) + + +class get_partition_req_args: + """ + Attributes: + - req + + """ + + def __init__( + self, + req=None, + ): + self.req = req + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.req = GetPartitionRequest() + self.req.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_partition_req_args") + if self.req is not None: + oprot.writeFieldBegin("req", TType.STRUCT, 1) + self.req.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_partition_req_args) +get_partition_req_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "req", + [GetPartitionRequest, None], + None, + ), # 1 +) + + +class get_partition_req_result: + """ + Attributes: + - success + - o1 + - o2 + + """ + + def __init__( + self, + success=None, + o1=None, + o2=None, + ): + self.success = success + self.o1 = o1 + self.o2 = o2 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = GetPartitionResponse() + self.success.read(iprot) + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = NoSuchObjectException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_partition_req_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_partition_req_result) +get_partition_req_result.thrift_spec = ( + ( + 0, + TType.STRUCT, + "success", + [GetPartitionResponse, None], + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [MetaException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [NoSuchObjectException, None], + None, + ), # 2 +) + + +class exchange_partition_args: + """ + Attributes: + - partitionSpecs + - source_db + - source_table_name + - dest_db + - dest_table_name + + """ + + def __init__( + self, + partitionSpecs=None, + source_db=None, + source_table_name=None, + dest_db=None, + dest_table_name=None, + ): + self.partitionSpecs = partitionSpecs + self.source_db = source_db + self.source_table_name = source_table_name + self.dest_db = dest_db + self.dest_table_name = dest_table_name + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.MAP: + self.partitionSpecs = {} + (_ktype1483, _vtype1484, _size1482) = iprot.readMapBegin() + for _i1486 in range(_size1482): + _key1487 = ( + iprot.readString().decode("utf-8", errors="replace") + if sys.version_info[0] == 2 + else iprot.readString() + ) + _val1488 = ( + iprot.readString().decode("utf-8", errors="replace") + if sys.version_info[0] == 2 + else iprot.readString() + ) + self.partitionSpecs[_key1487] = _val1488 + iprot.readMapEnd() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.source_db = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.source_table_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRING: + self.dest_db = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.STRING: + self.dest_table_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("exchange_partition_args") + if self.partitionSpecs is not None: + oprot.writeFieldBegin("partitionSpecs", TType.MAP, 1) + oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.partitionSpecs)) + for kiter1489, viter1490 in self.partitionSpecs.items(): + oprot.writeString(kiter1489.encode("utf-8") if sys.version_info[0] == 2 else kiter1489) + oprot.writeString(viter1490.encode("utf-8") if sys.version_info[0] == 2 else viter1490) + oprot.writeMapEnd() + oprot.writeFieldEnd() + if self.source_db is not None: + oprot.writeFieldBegin("source_db", TType.STRING, 2) + oprot.writeString(self.source_db.encode("utf-8") if sys.version_info[0] == 2 else self.source_db) + oprot.writeFieldEnd() + if self.source_table_name is not None: + oprot.writeFieldBegin("source_table_name", TType.STRING, 3) + oprot.writeString(self.source_table_name.encode("utf-8") if sys.version_info[0] == 2 else self.source_table_name) + oprot.writeFieldEnd() + if self.dest_db is not None: + oprot.writeFieldBegin("dest_db", TType.STRING, 4) + oprot.writeString(self.dest_db.encode("utf-8") if sys.version_info[0] == 2 else self.dest_db) + oprot.writeFieldEnd() + if self.dest_table_name is not None: + oprot.writeFieldBegin("dest_table_name", TType.STRING, 5) + oprot.writeString(self.dest_table_name.encode("utf-8") if sys.version_info[0] == 2 else self.dest_table_name) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(exchange_partition_args) +exchange_partition_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.MAP, + "partitionSpecs", + (TType.STRING, "UTF8", TType.STRING, "UTF8", False), + None, + ), # 1 + ( + 2, + TType.STRING, + "source_db", + "UTF8", + None, + ), # 2 + ( + 3, + TType.STRING, + "source_table_name", + "UTF8", + None, + ), # 3 + ( + 4, + TType.STRING, + "dest_db", + "UTF8", + None, + ), # 4 + ( + 5, + TType.STRING, + "dest_table_name", + "UTF8", + None, + ), # 5 +) + + +class exchange_partition_result: + """ + Attributes: + - success + - o1 + - o2 + - o3 + - o4 + + """ + + def __init__( + self, + success=None, + o1=None, + o2=None, + o3=None, + o4=None, + ): + self.success = success + self.o1 = o1 + self.o2 = o2 + self.o3 = o3 + self.o4 = o4 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = Partition() + self.success.read(iprot) + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = NoSuchObjectException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRUCT: + self.o3 = InvalidObjectException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRUCT: + self.o4 = InvalidInputException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("exchange_partition_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + if self.o3 is not None: + oprot.writeFieldBegin("o3", TType.STRUCT, 3) + self.o3.write(oprot) + oprot.writeFieldEnd() + if self.o4 is not None: + oprot.writeFieldBegin("o4", TType.STRUCT, 4) + self.o4.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(exchange_partition_result) +exchange_partition_result.thrift_spec = ( + ( + 0, + TType.STRUCT, + "success", + [Partition, None], + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [MetaException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [NoSuchObjectException, None], + None, + ), # 2 + ( + 3, + TType.STRUCT, + "o3", + [InvalidObjectException, None], + None, + ), # 3 + ( + 4, + TType.STRUCT, + "o4", + [InvalidInputException, None], + None, + ), # 4 +) + + +class exchange_partitions_args: + """ + Attributes: + - partitionSpecs + - source_db + - source_table_name + - dest_db + - dest_table_name + + """ + + def __init__( + self, + partitionSpecs=None, + source_db=None, + source_table_name=None, + dest_db=None, + dest_table_name=None, + ): + self.partitionSpecs = partitionSpecs + self.source_db = source_db + self.source_table_name = source_table_name + self.dest_db = dest_db + self.dest_table_name = dest_table_name + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.MAP: + self.partitionSpecs = {} + (_ktype1492, _vtype1493, _size1491) = iprot.readMapBegin() + for _i1495 in range(_size1491): + _key1496 = ( + iprot.readString().decode("utf-8", errors="replace") + if sys.version_info[0] == 2 + else iprot.readString() + ) + _val1497 = ( + iprot.readString().decode("utf-8", errors="replace") + if sys.version_info[0] == 2 + else iprot.readString() + ) + self.partitionSpecs[_key1496] = _val1497 + iprot.readMapEnd() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.source_db = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.source_table_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRING: + self.dest_db = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.STRING: + self.dest_table_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("exchange_partitions_args") + if self.partitionSpecs is not None: + oprot.writeFieldBegin("partitionSpecs", TType.MAP, 1) + oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.partitionSpecs)) + for kiter1498, viter1499 in self.partitionSpecs.items(): + oprot.writeString(kiter1498.encode("utf-8") if sys.version_info[0] == 2 else kiter1498) + oprot.writeString(viter1499.encode("utf-8") if sys.version_info[0] == 2 else viter1499) + oprot.writeMapEnd() + oprot.writeFieldEnd() + if self.source_db is not None: + oprot.writeFieldBegin("source_db", TType.STRING, 2) + oprot.writeString(self.source_db.encode("utf-8") if sys.version_info[0] == 2 else self.source_db) + oprot.writeFieldEnd() + if self.source_table_name is not None: + oprot.writeFieldBegin("source_table_name", TType.STRING, 3) + oprot.writeString(self.source_table_name.encode("utf-8") if sys.version_info[0] == 2 else self.source_table_name) + oprot.writeFieldEnd() + if self.dest_db is not None: + oprot.writeFieldBegin("dest_db", TType.STRING, 4) + oprot.writeString(self.dest_db.encode("utf-8") if sys.version_info[0] == 2 else self.dest_db) + oprot.writeFieldEnd() + if self.dest_table_name is not None: + oprot.writeFieldBegin("dest_table_name", TType.STRING, 5) + oprot.writeString(self.dest_table_name.encode("utf-8") if sys.version_info[0] == 2 else self.dest_table_name) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(exchange_partitions_args) +exchange_partitions_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.MAP, + "partitionSpecs", + (TType.STRING, "UTF8", TType.STRING, "UTF8", False), + None, + ), # 1 + ( + 2, + TType.STRING, + "source_db", + "UTF8", + None, + ), # 2 + ( + 3, + TType.STRING, + "source_table_name", + "UTF8", + None, + ), # 3 + ( + 4, + TType.STRING, + "dest_db", + "UTF8", + None, + ), # 4 + ( + 5, + TType.STRING, + "dest_table_name", + "UTF8", + None, + ), # 5 +) + + +class exchange_partitions_result: + """ + Attributes: + - success + - o1 + - o2 + - o3 + - o4 + + """ + + def __init__( + self, + success=None, + o1=None, + o2=None, + o3=None, + o4=None, + ): + self.success = success + self.o1 = o1 + self.o2 = o2 + self.o3 = o3 + self.o4 = o4 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.LIST: + self.success = [] + (_etype1503, _size1500) = iprot.readListBegin() + for _i1504 in range(_size1500): + _elem1505 = Partition() + _elem1505.read(iprot) + self.success.append(_elem1505) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = NoSuchObjectException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRUCT: + self.o3 = InvalidObjectException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRUCT: + self.o4 = InvalidInputException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("exchange_partitions_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.LIST, 0) + oprot.writeListBegin(TType.STRUCT, len(self.success)) + for iter1506 in self.success: + iter1506.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + if self.o3 is not None: + oprot.writeFieldBegin("o3", TType.STRUCT, 3) + self.o3.write(oprot) + oprot.writeFieldEnd() + if self.o4 is not None: + oprot.writeFieldBegin("o4", TType.STRUCT, 4) + self.o4.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(exchange_partitions_result) +exchange_partitions_result.thrift_spec = ( + ( + 0, + TType.LIST, + "success", + (TType.STRUCT, [Partition, None], False), + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [MetaException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [NoSuchObjectException, None], + None, + ), # 2 + ( + 3, + TType.STRUCT, + "o3", + [InvalidObjectException, None], + None, + ), # 3 + ( + 4, + TType.STRUCT, + "o4", + [InvalidInputException, None], + None, + ), # 4 +) + + +class get_partition_with_auth_args: + """ + Attributes: + - db_name + - tbl_name + - part_vals + - user_name + - group_names + + """ + + def __init__( + self, + db_name=None, + tbl_name=None, + part_vals=None, + user_name=None, + group_names=None, + ): + self.db_name = db_name + self.tbl_name = tbl_name + self.part_vals = part_vals + self.user_name = user_name + self.group_names = group_names + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.db_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.tbl_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.LIST: + self.part_vals = [] + (_etype1510, _size1507) = iprot.readListBegin() + for _i1511 in range(_size1507): + _elem1512 = ( + iprot.readString().decode("utf-8", errors="replace") + if sys.version_info[0] == 2 + else iprot.readString() + ) + self.part_vals.append(_elem1512) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRING: + self.user_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.LIST: + self.group_names = [] + (_etype1516, _size1513) = iprot.readListBegin() + for _i1517 in range(_size1513): + _elem1518 = ( + iprot.readString().decode("utf-8", errors="replace") + if sys.version_info[0] == 2 + else iprot.readString() + ) + self.group_names.append(_elem1518) + iprot.readListEnd() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_partition_with_auth_args") + if self.db_name is not None: + oprot.writeFieldBegin("db_name", TType.STRING, 1) + oprot.writeString(self.db_name.encode("utf-8") if sys.version_info[0] == 2 else self.db_name) + oprot.writeFieldEnd() + if self.tbl_name is not None: + oprot.writeFieldBegin("tbl_name", TType.STRING, 2) + oprot.writeString(self.tbl_name.encode("utf-8") if sys.version_info[0] == 2 else self.tbl_name) + oprot.writeFieldEnd() + if self.part_vals is not None: + oprot.writeFieldBegin("part_vals", TType.LIST, 3) + oprot.writeListBegin(TType.STRING, len(self.part_vals)) + for iter1519 in self.part_vals: + oprot.writeString(iter1519.encode("utf-8") if sys.version_info[0] == 2 else iter1519) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.user_name is not None: + oprot.writeFieldBegin("user_name", TType.STRING, 4) + oprot.writeString(self.user_name.encode("utf-8") if sys.version_info[0] == 2 else self.user_name) + oprot.writeFieldEnd() + if self.group_names is not None: + oprot.writeFieldBegin("group_names", TType.LIST, 5) + oprot.writeListBegin(TType.STRING, len(self.group_names)) + for iter1520 in self.group_names: + oprot.writeString(iter1520.encode("utf-8") if sys.version_info[0] == 2 else iter1520) + oprot.writeListEnd() + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_partition_with_auth_args) +get_partition_with_auth_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "db_name", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "tbl_name", + "UTF8", + None, + ), # 2 + ( + 3, + TType.LIST, + "part_vals", + (TType.STRING, "UTF8", False), + None, + ), # 3 + ( + 4, + TType.STRING, + "user_name", + "UTF8", + None, + ), # 4 + ( + 5, + TType.LIST, + "group_names", + (TType.STRING, "UTF8", False), + None, + ), # 5 +) + + +class get_partition_with_auth_result: + """ + Attributes: + - success + - o1 + - o2 + + """ + + def __init__( + self, + success=None, + o1=None, + o2=None, + ): + self.success = success + self.o1 = o1 + self.o2 = o2 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = Partition() + self.success.read(iprot) + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = NoSuchObjectException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_partition_with_auth_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_partition_with_auth_result) +get_partition_with_auth_result.thrift_spec = ( + ( + 0, + TType.STRUCT, + "success", + [Partition, None], + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [MetaException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [NoSuchObjectException, None], + None, + ), # 2 +) + + +class get_partition_by_name_args: + """ + Attributes: + - db_name + - tbl_name + - part_name + + """ + + def __init__( + self, + db_name=None, + tbl_name=None, + part_name=None, + ): + self.db_name = db_name + self.tbl_name = tbl_name + self.part_name = part_name + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.db_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.tbl_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.part_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_partition_by_name_args") + if self.db_name is not None: + oprot.writeFieldBegin("db_name", TType.STRING, 1) + oprot.writeString(self.db_name.encode("utf-8") if sys.version_info[0] == 2 else self.db_name) + oprot.writeFieldEnd() + if self.tbl_name is not None: + oprot.writeFieldBegin("tbl_name", TType.STRING, 2) + oprot.writeString(self.tbl_name.encode("utf-8") if sys.version_info[0] == 2 else self.tbl_name) + oprot.writeFieldEnd() + if self.part_name is not None: + oprot.writeFieldBegin("part_name", TType.STRING, 3) + oprot.writeString(self.part_name.encode("utf-8") if sys.version_info[0] == 2 else self.part_name) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_partition_by_name_args) +get_partition_by_name_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "db_name", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "tbl_name", + "UTF8", + None, + ), # 2 + ( + 3, + TType.STRING, + "part_name", + "UTF8", + None, + ), # 3 +) + + +class get_partition_by_name_result: + """ + Attributes: + - success + - o1 + - o2 + + """ + + def __init__( + self, + success=None, + o1=None, + o2=None, + ): + self.success = success + self.o1 = o1 + self.o2 = o2 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = Partition() + self.success.read(iprot) + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = NoSuchObjectException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_partition_by_name_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_partition_by_name_result) +get_partition_by_name_result.thrift_spec = ( + ( + 0, + TType.STRUCT, + "success", + [Partition, None], + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [MetaException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [NoSuchObjectException, None], + None, + ), # 2 +) + + +class get_partitions_args: + """ + Attributes: + - db_name + - tbl_name + - max_parts + + """ + + def __init__( + self, + db_name=None, + tbl_name=None, + max_parts=-1, + ): + self.db_name = db_name + self.tbl_name = tbl_name + self.max_parts = max_parts + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.db_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.tbl_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.I16: + self.max_parts = iprot.readI16() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_partitions_args") + if self.db_name is not None: + oprot.writeFieldBegin("db_name", TType.STRING, 1) + oprot.writeString(self.db_name.encode("utf-8") if sys.version_info[0] == 2 else self.db_name) + oprot.writeFieldEnd() + if self.tbl_name is not None: + oprot.writeFieldBegin("tbl_name", TType.STRING, 2) + oprot.writeString(self.tbl_name.encode("utf-8") if sys.version_info[0] == 2 else self.tbl_name) + oprot.writeFieldEnd() + if self.max_parts is not None: + oprot.writeFieldBegin("max_parts", TType.I16, 3) + oprot.writeI16(self.max_parts) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_partitions_args) +get_partitions_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "db_name", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "tbl_name", + "UTF8", + None, + ), # 2 + ( + 3, + TType.I16, + "max_parts", + None, + -1, + ), # 3 +) + + +class get_partitions_result: + """ + Attributes: + - success + - o1 + - o2 + + """ + + def __init__( + self, + success=None, + o1=None, + o2=None, + ): + self.success = success + self.o1 = o1 + self.o2 = o2 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.LIST: + self.success = [] + (_etype1524, _size1521) = iprot.readListBegin() + for _i1525 in range(_size1521): + _elem1526 = Partition() + _elem1526.read(iprot) + self.success.append(_elem1526) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = NoSuchObjectException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_partitions_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.LIST, 0) + oprot.writeListBegin(TType.STRUCT, len(self.success)) + for iter1527 in self.success: + iter1527.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_partitions_result) +get_partitions_result.thrift_spec = ( + ( + 0, + TType.LIST, + "success", + (TType.STRUCT, [Partition, None], False), + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [NoSuchObjectException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [MetaException, None], + None, + ), # 2 +) + + +class get_partitions_req_args: + """ + Attributes: + - req + + """ + + def __init__( + self, + req=None, + ): + self.req = req + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.req = PartitionsRequest() + self.req.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_partitions_req_args") + if self.req is not None: + oprot.writeFieldBegin("req", TType.STRUCT, 1) + self.req.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_partitions_req_args) +get_partitions_req_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "req", + [PartitionsRequest, None], + None, + ), # 1 +) + + +class get_partitions_req_result: + """ + Attributes: + - success + - o1 + - o2 + + """ + + def __init__( + self, + success=None, + o1=None, + o2=None, + ): + self.success = success + self.o1 = o1 + self.o2 = o2 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = PartitionsResponse() + self.success.read(iprot) + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = NoSuchObjectException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_partitions_req_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_partitions_req_result) +get_partitions_req_result.thrift_spec = ( + ( + 0, + TType.STRUCT, + "success", + [PartitionsResponse, None], + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [NoSuchObjectException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [MetaException, None], + None, + ), # 2 +) + + +class get_partitions_with_auth_args: + """ + Attributes: + - db_name + - tbl_name + - max_parts + - user_name + - group_names + + """ + + def __init__( + self, + db_name=None, + tbl_name=None, + max_parts=-1, + user_name=None, + group_names=None, + ): + self.db_name = db_name + self.tbl_name = tbl_name + self.max_parts = max_parts + self.user_name = user_name + self.group_names = group_names + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.db_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.tbl_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.I16: + self.max_parts = iprot.readI16() + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRING: + self.user_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.LIST: + self.group_names = [] + (_etype1531, _size1528) = iprot.readListBegin() + for _i1532 in range(_size1528): + _elem1533 = ( + iprot.readString().decode("utf-8", errors="replace") + if sys.version_info[0] == 2 + else iprot.readString() + ) + self.group_names.append(_elem1533) + iprot.readListEnd() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_partitions_with_auth_args") + if self.db_name is not None: + oprot.writeFieldBegin("db_name", TType.STRING, 1) + oprot.writeString(self.db_name.encode("utf-8") if sys.version_info[0] == 2 else self.db_name) + oprot.writeFieldEnd() + if self.tbl_name is not None: + oprot.writeFieldBegin("tbl_name", TType.STRING, 2) + oprot.writeString(self.tbl_name.encode("utf-8") if sys.version_info[0] == 2 else self.tbl_name) + oprot.writeFieldEnd() + if self.max_parts is not None: + oprot.writeFieldBegin("max_parts", TType.I16, 3) + oprot.writeI16(self.max_parts) + oprot.writeFieldEnd() + if self.user_name is not None: + oprot.writeFieldBegin("user_name", TType.STRING, 4) + oprot.writeString(self.user_name.encode("utf-8") if sys.version_info[0] == 2 else self.user_name) + oprot.writeFieldEnd() + if self.group_names is not None: + oprot.writeFieldBegin("group_names", TType.LIST, 5) + oprot.writeListBegin(TType.STRING, len(self.group_names)) + for iter1534 in self.group_names: + oprot.writeString(iter1534.encode("utf-8") if sys.version_info[0] == 2 else iter1534) + oprot.writeListEnd() + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_partitions_with_auth_args) +get_partitions_with_auth_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "db_name", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "tbl_name", + "UTF8", + None, + ), # 2 + ( + 3, + TType.I16, + "max_parts", + None, + -1, + ), # 3 + ( + 4, + TType.STRING, + "user_name", + "UTF8", + None, + ), # 4 + ( + 5, + TType.LIST, + "group_names", + (TType.STRING, "UTF8", False), + None, + ), # 5 +) + + +class get_partitions_with_auth_result: + """ + Attributes: + - success + - o1 + - o2 + + """ + + def __init__( + self, + success=None, + o1=None, + o2=None, + ): + self.success = success + self.o1 = o1 + self.o2 = o2 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.LIST: + self.success = [] + (_etype1538, _size1535) = iprot.readListBegin() + for _i1539 in range(_size1535): + _elem1540 = Partition() + _elem1540.read(iprot) + self.success.append(_elem1540) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = NoSuchObjectException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_partitions_with_auth_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.LIST, 0) + oprot.writeListBegin(TType.STRUCT, len(self.success)) + for iter1541 in self.success: + iter1541.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_partitions_with_auth_result) +get_partitions_with_auth_result.thrift_spec = ( + ( + 0, + TType.LIST, + "success", + (TType.STRUCT, [Partition, None], False), + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [NoSuchObjectException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [MetaException, None], + None, + ), # 2 +) + + +class get_partitions_pspec_args: + """ + Attributes: + - db_name + - tbl_name + - max_parts + + """ + + def __init__( + self, + db_name=None, + tbl_name=None, + max_parts=-1, + ): + self.db_name = db_name + self.tbl_name = tbl_name + self.max_parts = max_parts + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.db_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.tbl_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.I32: + self.max_parts = iprot.readI32() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_partitions_pspec_args") + if self.db_name is not None: + oprot.writeFieldBegin("db_name", TType.STRING, 1) + oprot.writeString(self.db_name.encode("utf-8") if sys.version_info[0] == 2 else self.db_name) + oprot.writeFieldEnd() + if self.tbl_name is not None: + oprot.writeFieldBegin("tbl_name", TType.STRING, 2) + oprot.writeString(self.tbl_name.encode("utf-8") if sys.version_info[0] == 2 else self.tbl_name) + oprot.writeFieldEnd() + if self.max_parts is not None: + oprot.writeFieldBegin("max_parts", TType.I32, 3) + oprot.writeI32(self.max_parts) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_partitions_pspec_args) +get_partitions_pspec_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "db_name", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "tbl_name", + "UTF8", + None, + ), # 2 + ( + 3, + TType.I32, + "max_parts", + None, + -1, + ), # 3 +) + + +class get_partitions_pspec_result: + """ + Attributes: + - success + - o1 + - o2 + + """ + + def __init__( + self, + success=None, + o1=None, + o2=None, + ): + self.success = success + self.o1 = o1 + self.o2 = o2 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.LIST: + self.success = [] + (_etype1545, _size1542) = iprot.readListBegin() + for _i1546 in range(_size1542): + _elem1547 = PartitionSpec() + _elem1547.read(iprot) + self.success.append(_elem1547) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = NoSuchObjectException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_partitions_pspec_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.LIST, 0) + oprot.writeListBegin(TType.STRUCT, len(self.success)) + for iter1548 in self.success: + iter1548.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_partitions_pspec_result) +get_partitions_pspec_result.thrift_spec = ( + ( + 0, + TType.LIST, + "success", + (TType.STRUCT, [PartitionSpec, None], False), + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [NoSuchObjectException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [MetaException, None], + None, + ), # 2 +) + + +class get_partition_names_args: + """ + Attributes: + - db_name + - tbl_name + - max_parts + + """ + + def __init__( + self, + db_name=None, + tbl_name=None, + max_parts=-1, + ): + self.db_name = db_name + self.tbl_name = tbl_name + self.max_parts = max_parts + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.db_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.tbl_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.I16: + self.max_parts = iprot.readI16() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_partition_names_args") + if self.db_name is not None: + oprot.writeFieldBegin("db_name", TType.STRING, 1) + oprot.writeString(self.db_name.encode("utf-8") if sys.version_info[0] == 2 else self.db_name) + oprot.writeFieldEnd() + if self.tbl_name is not None: + oprot.writeFieldBegin("tbl_name", TType.STRING, 2) + oprot.writeString(self.tbl_name.encode("utf-8") if sys.version_info[0] == 2 else self.tbl_name) + oprot.writeFieldEnd() + if self.max_parts is not None: + oprot.writeFieldBegin("max_parts", TType.I16, 3) + oprot.writeI16(self.max_parts) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_partition_names_args) +get_partition_names_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "db_name", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "tbl_name", + "UTF8", + None, + ), # 2 + ( + 3, + TType.I16, + "max_parts", + None, + -1, + ), # 3 +) + + +class get_partition_names_result: + """ + Attributes: + - success + - o1 + - o2 + + """ + + def __init__( + self, + success=None, + o1=None, + o2=None, + ): + self.success = success + self.o1 = o1 + self.o2 = o2 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.LIST: + self.success = [] + (_etype1552, _size1549) = iprot.readListBegin() + for _i1553 in range(_size1549): + _elem1554 = ( + iprot.readString().decode("utf-8", errors="replace") + if sys.version_info[0] == 2 + else iprot.readString() + ) + self.success.append(_elem1554) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = NoSuchObjectException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_partition_names_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.LIST, 0) + oprot.writeListBegin(TType.STRING, len(self.success)) + for iter1555 in self.success: + oprot.writeString(iter1555.encode("utf-8") if sys.version_info[0] == 2 else iter1555) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_partition_names_result) +get_partition_names_result.thrift_spec = ( + ( + 0, + TType.LIST, + "success", + (TType.STRING, "UTF8", False), + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [NoSuchObjectException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [MetaException, None], + None, + ), # 2 +) + + +class get_partition_values_args: + """ + Attributes: + - request + + """ + + def __init__( + self, + request=None, + ): + self.request = request + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.request = PartitionValuesRequest() + self.request.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_partition_values_args") + if self.request is not None: + oprot.writeFieldBegin("request", TType.STRUCT, 1) + self.request.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_partition_values_args) +get_partition_values_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "request", + [PartitionValuesRequest, None], + None, + ), # 1 +) + + +class get_partition_values_result: + """ + Attributes: + - success + - o1 + - o2 + + """ + + def __init__( + self, + success=None, + o1=None, + o2=None, + ): + self.success = success + self.o1 = o1 + self.o2 = o2 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = PartitionValuesResponse() + self.success.read(iprot) + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = NoSuchObjectException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_partition_values_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_partition_values_result) +get_partition_values_result.thrift_spec = ( + ( + 0, + TType.STRUCT, + "success", + [PartitionValuesResponse, None], + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [MetaException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [NoSuchObjectException, None], + None, + ), # 2 +) + + +class get_partitions_ps_args: + """ + Attributes: + - db_name + - tbl_name + - part_vals + - max_parts + + """ + + def __init__( + self, + db_name=None, + tbl_name=None, + part_vals=None, + max_parts=-1, + ): + self.db_name = db_name + self.tbl_name = tbl_name + self.part_vals = part_vals + self.max_parts = max_parts + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.db_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.tbl_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.LIST: + self.part_vals = [] + (_etype1559, _size1556) = iprot.readListBegin() + for _i1560 in range(_size1556): + _elem1561 = ( + iprot.readString().decode("utf-8", errors="replace") + if sys.version_info[0] == 2 + else iprot.readString() + ) + self.part_vals.append(_elem1561) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.I16: + self.max_parts = iprot.readI16() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_partitions_ps_args") + if self.db_name is not None: + oprot.writeFieldBegin("db_name", TType.STRING, 1) + oprot.writeString(self.db_name.encode("utf-8") if sys.version_info[0] == 2 else self.db_name) + oprot.writeFieldEnd() + if self.tbl_name is not None: + oprot.writeFieldBegin("tbl_name", TType.STRING, 2) + oprot.writeString(self.tbl_name.encode("utf-8") if sys.version_info[0] == 2 else self.tbl_name) + oprot.writeFieldEnd() + if self.part_vals is not None: + oprot.writeFieldBegin("part_vals", TType.LIST, 3) + oprot.writeListBegin(TType.STRING, len(self.part_vals)) + for iter1562 in self.part_vals: + oprot.writeString(iter1562.encode("utf-8") if sys.version_info[0] == 2 else iter1562) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.max_parts is not None: + oprot.writeFieldBegin("max_parts", TType.I16, 4) + oprot.writeI16(self.max_parts) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_partitions_ps_args) +get_partitions_ps_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "db_name", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "tbl_name", + "UTF8", + None, + ), # 2 + ( + 3, + TType.LIST, + "part_vals", + (TType.STRING, "UTF8", False), + None, + ), # 3 + ( + 4, + TType.I16, + "max_parts", + None, + -1, + ), # 4 +) + + +class get_partitions_ps_result: + """ + Attributes: + - success + - o1 + - o2 + + """ + + def __init__( + self, + success=None, + o1=None, + o2=None, + ): + self.success = success + self.o1 = o1 + self.o2 = o2 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.LIST: + self.success = [] + (_etype1566, _size1563) = iprot.readListBegin() + for _i1567 in range(_size1563): + _elem1568 = Partition() + _elem1568.read(iprot) + self.success.append(_elem1568) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = NoSuchObjectException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_partitions_ps_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.LIST, 0) + oprot.writeListBegin(TType.STRUCT, len(self.success)) + for iter1569 in self.success: + iter1569.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_partitions_ps_result) +get_partitions_ps_result.thrift_spec = ( + ( + 0, + TType.LIST, + "success", + (TType.STRUCT, [Partition, None], False), + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [MetaException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [NoSuchObjectException, None], + None, + ), # 2 +) + + +class get_partitions_ps_with_auth_args: + """ + Attributes: + - db_name + - tbl_name + - part_vals + - max_parts + - user_name + - group_names + + """ + + def __init__( + self, + db_name=None, + tbl_name=None, + part_vals=None, + max_parts=-1, + user_name=None, + group_names=None, + ): + self.db_name = db_name + self.tbl_name = tbl_name + self.part_vals = part_vals + self.max_parts = max_parts + self.user_name = user_name + self.group_names = group_names + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.db_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.tbl_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.LIST: + self.part_vals = [] + (_etype1573, _size1570) = iprot.readListBegin() + for _i1574 in range(_size1570): + _elem1575 = ( + iprot.readString().decode("utf-8", errors="replace") + if sys.version_info[0] == 2 + else iprot.readString() + ) + self.part_vals.append(_elem1575) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.I16: + self.max_parts = iprot.readI16() + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.STRING: + self.user_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 6: + if ftype == TType.LIST: + self.group_names = [] + (_etype1579, _size1576) = iprot.readListBegin() + for _i1580 in range(_size1576): + _elem1581 = ( + iprot.readString().decode("utf-8", errors="replace") + if sys.version_info[0] == 2 + else iprot.readString() + ) + self.group_names.append(_elem1581) + iprot.readListEnd() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_partitions_ps_with_auth_args") + if self.db_name is not None: + oprot.writeFieldBegin("db_name", TType.STRING, 1) + oprot.writeString(self.db_name.encode("utf-8") if sys.version_info[0] == 2 else self.db_name) + oprot.writeFieldEnd() + if self.tbl_name is not None: + oprot.writeFieldBegin("tbl_name", TType.STRING, 2) + oprot.writeString(self.tbl_name.encode("utf-8") if sys.version_info[0] == 2 else self.tbl_name) + oprot.writeFieldEnd() + if self.part_vals is not None: + oprot.writeFieldBegin("part_vals", TType.LIST, 3) + oprot.writeListBegin(TType.STRING, len(self.part_vals)) + for iter1582 in self.part_vals: + oprot.writeString(iter1582.encode("utf-8") if sys.version_info[0] == 2 else iter1582) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.max_parts is not None: + oprot.writeFieldBegin("max_parts", TType.I16, 4) + oprot.writeI16(self.max_parts) + oprot.writeFieldEnd() + if self.user_name is not None: + oprot.writeFieldBegin("user_name", TType.STRING, 5) + oprot.writeString(self.user_name.encode("utf-8") if sys.version_info[0] == 2 else self.user_name) + oprot.writeFieldEnd() + if self.group_names is not None: + oprot.writeFieldBegin("group_names", TType.LIST, 6) + oprot.writeListBegin(TType.STRING, len(self.group_names)) + for iter1583 in self.group_names: + oprot.writeString(iter1583.encode("utf-8") if sys.version_info[0] == 2 else iter1583) + oprot.writeListEnd() + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_partitions_ps_with_auth_args) +get_partitions_ps_with_auth_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "db_name", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "tbl_name", + "UTF8", + None, + ), # 2 + ( + 3, + TType.LIST, + "part_vals", + (TType.STRING, "UTF8", False), + None, + ), # 3 + ( + 4, + TType.I16, + "max_parts", + None, + -1, + ), # 4 + ( + 5, + TType.STRING, + "user_name", + "UTF8", + None, + ), # 5 + ( + 6, + TType.LIST, + "group_names", + (TType.STRING, "UTF8", False), + None, + ), # 6 +) + + +class get_partitions_ps_with_auth_result: + """ + Attributes: + - success + - o1 + - o2 + + """ + + def __init__( + self, + success=None, + o1=None, + o2=None, + ): + self.success = success + self.o1 = o1 + self.o2 = o2 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.LIST: + self.success = [] + (_etype1587, _size1584) = iprot.readListBegin() + for _i1588 in range(_size1584): + _elem1589 = Partition() + _elem1589.read(iprot) + self.success.append(_elem1589) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = NoSuchObjectException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_partitions_ps_with_auth_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.LIST, 0) + oprot.writeListBegin(TType.STRUCT, len(self.success)) + for iter1590 in self.success: + iter1590.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_partitions_ps_with_auth_result) +get_partitions_ps_with_auth_result.thrift_spec = ( + ( + 0, + TType.LIST, + "success", + (TType.STRUCT, [Partition, None], False), + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [NoSuchObjectException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [MetaException, None], + None, + ), # 2 +) + + +class get_partitions_ps_with_auth_req_args: + """ + Attributes: + - req + + """ + + def __init__( + self, + req=None, + ): + self.req = req + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.req = GetPartitionsPsWithAuthRequest() + self.req.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_partitions_ps_with_auth_req_args") + if self.req is not None: + oprot.writeFieldBegin("req", TType.STRUCT, 1) + self.req.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_partitions_ps_with_auth_req_args) +get_partitions_ps_with_auth_req_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "req", + [GetPartitionsPsWithAuthRequest, None], + None, + ), # 1 +) + + +class get_partitions_ps_with_auth_req_result: + """ + Attributes: + - success + - o1 + - o2 + + """ + + def __init__( + self, + success=None, + o1=None, + o2=None, + ): + self.success = success + self.o1 = o1 + self.o2 = o2 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = GetPartitionsPsWithAuthResponse() + self.success.read(iprot) + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = NoSuchObjectException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_partitions_ps_with_auth_req_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_partitions_ps_with_auth_req_result) +get_partitions_ps_with_auth_req_result.thrift_spec = ( + ( + 0, + TType.STRUCT, + "success", + [GetPartitionsPsWithAuthResponse, None], + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [MetaException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [NoSuchObjectException, None], + None, + ), # 2 +) + + +class get_partition_names_ps_args: + """ + Attributes: + - db_name + - tbl_name + - part_vals + - max_parts + + """ + + def __init__( + self, + db_name=None, + tbl_name=None, + part_vals=None, + max_parts=-1, + ): + self.db_name = db_name + self.tbl_name = tbl_name + self.part_vals = part_vals + self.max_parts = max_parts + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.db_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.tbl_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.LIST: + self.part_vals = [] + (_etype1594, _size1591) = iprot.readListBegin() + for _i1595 in range(_size1591): + _elem1596 = ( + iprot.readString().decode("utf-8", errors="replace") + if sys.version_info[0] == 2 + else iprot.readString() + ) + self.part_vals.append(_elem1596) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.I16: + self.max_parts = iprot.readI16() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_partition_names_ps_args") + if self.db_name is not None: + oprot.writeFieldBegin("db_name", TType.STRING, 1) + oprot.writeString(self.db_name.encode("utf-8") if sys.version_info[0] == 2 else self.db_name) + oprot.writeFieldEnd() + if self.tbl_name is not None: + oprot.writeFieldBegin("tbl_name", TType.STRING, 2) + oprot.writeString(self.tbl_name.encode("utf-8") if sys.version_info[0] == 2 else self.tbl_name) + oprot.writeFieldEnd() + if self.part_vals is not None: + oprot.writeFieldBegin("part_vals", TType.LIST, 3) + oprot.writeListBegin(TType.STRING, len(self.part_vals)) + for iter1597 in self.part_vals: + oprot.writeString(iter1597.encode("utf-8") if sys.version_info[0] == 2 else iter1597) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.max_parts is not None: + oprot.writeFieldBegin("max_parts", TType.I16, 4) + oprot.writeI16(self.max_parts) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_partition_names_ps_args) +get_partition_names_ps_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "db_name", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "tbl_name", + "UTF8", + None, + ), # 2 + ( + 3, + TType.LIST, + "part_vals", + (TType.STRING, "UTF8", False), + None, + ), # 3 + ( + 4, + TType.I16, + "max_parts", + None, + -1, + ), # 4 +) + + +class get_partition_names_ps_result: + """ + Attributes: + - success + - o1 + - o2 + + """ + + def __init__( + self, + success=None, + o1=None, + o2=None, + ): + self.success = success + self.o1 = o1 + self.o2 = o2 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.LIST: + self.success = [] + (_etype1601, _size1598) = iprot.readListBegin() + for _i1602 in range(_size1598): + _elem1603 = ( + iprot.readString().decode("utf-8", errors="replace") + if sys.version_info[0] == 2 + else iprot.readString() + ) + self.success.append(_elem1603) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = NoSuchObjectException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_partition_names_ps_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.LIST, 0) + oprot.writeListBegin(TType.STRING, len(self.success)) + for iter1604 in self.success: + oprot.writeString(iter1604.encode("utf-8") if sys.version_info[0] == 2 else iter1604) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_partition_names_ps_result) +get_partition_names_ps_result.thrift_spec = ( + ( + 0, + TType.LIST, + "success", + (TType.STRING, "UTF8", False), + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [MetaException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [NoSuchObjectException, None], + None, + ), # 2 +) + + +class get_partition_names_ps_req_args: + """ + Attributes: + - req + + """ + + def __init__( + self, + req=None, + ): + self.req = req + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.req = GetPartitionNamesPsRequest() + self.req.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_partition_names_ps_req_args") + if self.req is not None: + oprot.writeFieldBegin("req", TType.STRUCT, 1) + self.req.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_partition_names_ps_req_args) +get_partition_names_ps_req_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "req", + [GetPartitionNamesPsRequest, None], + None, + ), # 1 +) + + +class get_partition_names_ps_req_result: + """ + Attributes: + - success + - o1 + - o2 + + """ + + def __init__( + self, + success=None, + o1=None, + o2=None, + ): + self.success = success + self.o1 = o1 + self.o2 = o2 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = GetPartitionNamesPsResponse() + self.success.read(iprot) + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = NoSuchObjectException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_partition_names_ps_req_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_partition_names_ps_req_result) +get_partition_names_ps_req_result.thrift_spec = ( + ( + 0, + TType.STRUCT, + "success", + [GetPartitionNamesPsResponse, None], + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [MetaException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [NoSuchObjectException, None], + None, + ), # 2 +) + + +class get_partition_names_req_args: + """ + Attributes: + - req + + """ + + def __init__( + self, + req=None, + ): + self.req = req + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.req = PartitionsByExprRequest() + self.req.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_partition_names_req_args") + if self.req is not None: + oprot.writeFieldBegin("req", TType.STRUCT, 1) + self.req.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_partition_names_req_args) +get_partition_names_req_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "req", + [PartitionsByExprRequest, None], + None, + ), # 1 +) + + +class get_partition_names_req_result: + """ + Attributes: + - success + - o1 + - o2 + + """ + + def __init__( + self, + success=None, + o1=None, + o2=None, + ): + self.success = success + self.o1 = o1 + self.o2 = o2 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.LIST: + self.success = [] + (_etype1608, _size1605) = iprot.readListBegin() + for _i1609 in range(_size1605): + _elem1610 = ( + iprot.readString().decode("utf-8", errors="replace") + if sys.version_info[0] == 2 + else iprot.readString() + ) + self.success.append(_elem1610) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = NoSuchObjectException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_partition_names_req_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.LIST, 0) + oprot.writeListBegin(TType.STRING, len(self.success)) + for iter1611 in self.success: + oprot.writeString(iter1611.encode("utf-8") if sys.version_info[0] == 2 else iter1611) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_partition_names_req_result) +get_partition_names_req_result.thrift_spec = ( + ( + 0, + TType.LIST, + "success", + (TType.STRING, "UTF8", False), + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [MetaException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [NoSuchObjectException, None], + None, + ), # 2 +) + + +class get_partitions_by_filter_args: + """ + Attributes: + - db_name + - tbl_name + - filter + - max_parts + + """ + + def __init__( + self, + db_name=None, + tbl_name=None, + filter=None, + max_parts=-1, + ): + self.db_name = db_name + self.tbl_name = tbl_name + self.filter = filter + self.max_parts = max_parts + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.db_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.tbl_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.filter = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.I16: + self.max_parts = iprot.readI16() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_partitions_by_filter_args") + if self.db_name is not None: + oprot.writeFieldBegin("db_name", TType.STRING, 1) + oprot.writeString(self.db_name.encode("utf-8") if sys.version_info[0] == 2 else self.db_name) + oprot.writeFieldEnd() + if self.tbl_name is not None: + oprot.writeFieldBegin("tbl_name", TType.STRING, 2) + oprot.writeString(self.tbl_name.encode("utf-8") if sys.version_info[0] == 2 else self.tbl_name) + oprot.writeFieldEnd() + if self.filter is not None: + oprot.writeFieldBegin("filter", TType.STRING, 3) + oprot.writeString(self.filter.encode("utf-8") if sys.version_info[0] == 2 else self.filter) + oprot.writeFieldEnd() + if self.max_parts is not None: + oprot.writeFieldBegin("max_parts", TType.I16, 4) + oprot.writeI16(self.max_parts) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_partitions_by_filter_args) +get_partitions_by_filter_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "db_name", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "tbl_name", + "UTF8", + None, + ), # 2 + ( + 3, + TType.STRING, + "filter", + "UTF8", + None, + ), # 3 + ( + 4, + TType.I16, + "max_parts", + None, + -1, + ), # 4 +) + + +class get_partitions_by_filter_result: + """ + Attributes: + - success + - o1 + - o2 + + """ + + def __init__( + self, + success=None, + o1=None, + o2=None, + ): + self.success = success + self.o1 = o1 + self.o2 = o2 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.LIST: + self.success = [] + (_etype1615, _size1612) = iprot.readListBegin() + for _i1616 in range(_size1612): + _elem1617 = Partition() + _elem1617.read(iprot) + self.success.append(_elem1617) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = NoSuchObjectException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_partitions_by_filter_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.LIST, 0) + oprot.writeListBegin(TType.STRUCT, len(self.success)) + for iter1618 in self.success: + iter1618.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_partitions_by_filter_result) +get_partitions_by_filter_result.thrift_spec = ( + ( + 0, + TType.LIST, + "success", + (TType.STRUCT, [Partition, None], False), + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [MetaException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [NoSuchObjectException, None], + None, + ), # 2 +) + + +class get_part_specs_by_filter_args: + """ + Attributes: + - db_name + - tbl_name + - filter + - max_parts + + """ + + def __init__( + self, + db_name=None, + tbl_name=None, + filter=None, + max_parts=-1, + ): + self.db_name = db_name + self.tbl_name = tbl_name + self.filter = filter + self.max_parts = max_parts + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.db_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.tbl_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.filter = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.I32: + self.max_parts = iprot.readI32() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_part_specs_by_filter_args") + if self.db_name is not None: + oprot.writeFieldBegin("db_name", TType.STRING, 1) + oprot.writeString(self.db_name.encode("utf-8") if sys.version_info[0] == 2 else self.db_name) + oprot.writeFieldEnd() + if self.tbl_name is not None: + oprot.writeFieldBegin("tbl_name", TType.STRING, 2) + oprot.writeString(self.tbl_name.encode("utf-8") if sys.version_info[0] == 2 else self.tbl_name) + oprot.writeFieldEnd() + if self.filter is not None: + oprot.writeFieldBegin("filter", TType.STRING, 3) + oprot.writeString(self.filter.encode("utf-8") if sys.version_info[0] == 2 else self.filter) + oprot.writeFieldEnd() + if self.max_parts is not None: + oprot.writeFieldBegin("max_parts", TType.I32, 4) + oprot.writeI32(self.max_parts) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_part_specs_by_filter_args) +get_part_specs_by_filter_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "db_name", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "tbl_name", + "UTF8", + None, + ), # 2 + ( + 3, + TType.STRING, + "filter", + "UTF8", + None, + ), # 3 + ( + 4, + TType.I32, + "max_parts", + None, + -1, + ), # 4 +) + + +class get_part_specs_by_filter_result: + """ + Attributes: + - success + - o1 + - o2 + + """ + + def __init__( + self, + success=None, + o1=None, + o2=None, + ): + self.success = success + self.o1 = o1 + self.o2 = o2 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.LIST: + self.success = [] + (_etype1622, _size1619) = iprot.readListBegin() + for _i1623 in range(_size1619): + _elem1624 = PartitionSpec() + _elem1624.read(iprot) + self.success.append(_elem1624) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = NoSuchObjectException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_part_specs_by_filter_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.LIST, 0) + oprot.writeListBegin(TType.STRUCT, len(self.success)) + for iter1625 in self.success: + iter1625.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_part_specs_by_filter_result) +get_part_specs_by_filter_result.thrift_spec = ( + ( + 0, + TType.LIST, + "success", + (TType.STRUCT, [PartitionSpec, None], False), + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [MetaException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [NoSuchObjectException, None], + None, + ), # 2 +) + + +class get_partitions_by_expr_args: + """ + Attributes: + - req + + """ + + def __init__( + self, + req=None, + ): + self.req = req + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.req = PartitionsByExprRequest() + self.req.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_partitions_by_expr_args") + if self.req is not None: + oprot.writeFieldBegin("req", TType.STRUCT, 1) + self.req.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_partitions_by_expr_args) +get_partitions_by_expr_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "req", + [PartitionsByExprRequest, None], + None, + ), # 1 +) + + +class get_partitions_by_expr_result: + """ + Attributes: + - success + - o1 + - o2 + + """ + + def __init__( + self, + success=None, + o1=None, + o2=None, + ): + self.success = success + self.o1 = o1 + self.o2 = o2 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = PartitionsByExprResult() + self.success.read(iprot) + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = NoSuchObjectException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_partitions_by_expr_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_partitions_by_expr_result) +get_partitions_by_expr_result.thrift_spec = ( + ( + 0, + TType.STRUCT, + "success", + [PartitionsByExprResult, None], + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [MetaException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [NoSuchObjectException, None], + None, + ), # 2 +) + + +class get_partitions_spec_by_expr_args: + """ + Attributes: + - req + + """ + + def __init__( + self, + req=None, + ): + self.req = req + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.req = PartitionsByExprRequest() + self.req.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_partitions_spec_by_expr_args") + if self.req is not None: + oprot.writeFieldBegin("req", TType.STRUCT, 1) + self.req.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_partitions_spec_by_expr_args) +get_partitions_spec_by_expr_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "req", + [PartitionsByExprRequest, None], + None, + ), # 1 +) + + +class get_partitions_spec_by_expr_result: + """ + Attributes: + - success + - o1 + - o2 + + """ + + def __init__( + self, + success=None, + o1=None, + o2=None, + ): + self.success = success + self.o1 = o1 + self.o2 = o2 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = PartitionsSpecByExprResult() + self.success.read(iprot) + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = NoSuchObjectException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_partitions_spec_by_expr_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_partitions_spec_by_expr_result) +get_partitions_spec_by_expr_result.thrift_spec = ( + ( + 0, + TType.STRUCT, + "success", + [PartitionsSpecByExprResult, None], + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [MetaException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [NoSuchObjectException, None], + None, + ), # 2 +) + + +class get_num_partitions_by_filter_args: + """ + Attributes: + - db_name + - tbl_name + - filter + + """ + + def __init__( + self, + db_name=None, + tbl_name=None, + filter=None, + ): + self.db_name = db_name + self.tbl_name = tbl_name + self.filter = filter + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.db_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.tbl_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.filter = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_num_partitions_by_filter_args") + if self.db_name is not None: + oprot.writeFieldBegin("db_name", TType.STRING, 1) + oprot.writeString(self.db_name.encode("utf-8") if sys.version_info[0] == 2 else self.db_name) + oprot.writeFieldEnd() + if self.tbl_name is not None: + oprot.writeFieldBegin("tbl_name", TType.STRING, 2) + oprot.writeString(self.tbl_name.encode("utf-8") if sys.version_info[0] == 2 else self.tbl_name) + oprot.writeFieldEnd() + if self.filter is not None: + oprot.writeFieldBegin("filter", TType.STRING, 3) + oprot.writeString(self.filter.encode("utf-8") if sys.version_info[0] == 2 else self.filter) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_num_partitions_by_filter_args) +get_num_partitions_by_filter_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "db_name", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "tbl_name", + "UTF8", + None, + ), # 2 + ( + 3, + TType.STRING, + "filter", + "UTF8", + None, + ), # 3 +) + + +class get_num_partitions_by_filter_result: + """ + Attributes: + - success + - o1 + - o2 + + """ + + def __init__( + self, + success=None, + o1=None, + o2=None, + ): + self.success = success + self.o1 = o1 + self.o2 = o2 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.I32: + self.success = iprot.readI32() + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = NoSuchObjectException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_num_partitions_by_filter_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.I32, 0) + oprot.writeI32(self.success) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_num_partitions_by_filter_result) +get_num_partitions_by_filter_result.thrift_spec = ( + ( + 0, + TType.I32, + "success", + None, + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [MetaException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [NoSuchObjectException, None], + None, + ), # 2 +) + + +class get_partitions_by_names_args: + """ + Attributes: + - db_name + - tbl_name + - names + + """ + + def __init__( + self, + db_name=None, + tbl_name=None, + names=None, + ): + self.db_name = db_name + self.tbl_name = tbl_name + self.names = names + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.db_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.tbl_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.LIST: + self.names = [] + (_etype1629, _size1626) = iprot.readListBegin() + for _i1630 in range(_size1626): + _elem1631 = ( + iprot.readString().decode("utf-8", errors="replace") + if sys.version_info[0] == 2 + else iprot.readString() + ) + self.names.append(_elem1631) + iprot.readListEnd() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_partitions_by_names_args") + if self.db_name is not None: + oprot.writeFieldBegin("db_name", TType.STRING, 1) + oprot.writeString(self.db_name.encode("utf-8") if sys.version_info[0] == 2 else self.db_name) + oprot.writeFieldEnd() + if self.tbl_name is not None: + oprot.writeFieldBegin("tbl_name", TType.STRING, 2) + oprot.writeString(self.tbl_name.encode("utf-8") if sys.version_info[0] == 2 else self.tbl_name) + oprot.writeFieldEnd() + if self.names is not None: + oprot.writeFieldBegin("names", TType.LIST, 3) + oprot.writeListBegin(TType.STRING, len(self.names)) + for iter1632 in self.names: + oprot.writeString(iter1632.encode("utf-8") if sys.version_info[0] == 2 else iter1632) + oprot.writeListEnd() + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_partitions_by_names_args) +get_partitions_by_names_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "db_name", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "tbl_name", + "UTF8", + None, + ), # 2 + ( + 3, + TType.LIST, + "names", + (TType.STRING, "UTF8", False), + None, + ), # 3 +) + + +class get_partitions_by_names_result: + """ + Attributes: + - success + - o1 + - o2 + + """ + + def __init__( + self, + success=None, + o1=None, + o2=None, + ): + self.success = success + self.o1 = o1 + self.o2 = o2 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.LIST: + self.success = [] + (_etype1636, _size1633) = iprot.readListBegin() + for _i1637 in range(_size1633): + _elem1638 = Partition() + _elem1638.read(iprot) + self.success.append(_elem1638) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = NoSuchObjectException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_partitions_by_names_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.LIST, 0) + oprot.writeListBegin(TType.STRUCT, len(self.success)) + for iter1639 in self.success: + iter1639.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_partitions_by_names_result) +get_partitions_by_names_result.thrift_spec = ( + ( + 0, + TType.LIST, + "success", + (TType.STRUCT, [Partition, None], False), + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [MetaException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [NoSuchObjectException, None], + None, + ), # 2 +) + + +class get_partitions_by_names_req_args: + """ + Attributes: + - req + + """ + + def __init__( + self, + req=None, + ): + self.req = req + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.req = GetPartitionsByNamesRequest() + self.req.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_partitions_by_names_req_args") + if self.req is not None: + oprot.writeFieldBegin("req", TType.STRUCT, 1) + self.req.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_partitions_by_names_req_args) +get_partitions_by_names_req_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "req", + [GetPartitionsByNamesRequest, None], + None, + ), # 1 +) + + +class get_partitions_by_names_req_result: + """ + Attributes: + - success + - o1 + - o2 + + """ + + def __init__( + self, + success=None, + o1=None, + o2=None, + ): + self.success = success + self.o1 = o1 + self.o2 = o2 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = GetPartitionsByNamesResult() + self.success.read(iprot) + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = NoSuchObjectException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_partitions_by_names_req_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_partitions_by_names_req_result) +get_partitions_by_names_req_result.thrift_spec = ( + ( + 0, + TType.STRUCT, + "success", + [GetPartitionsByNamesResult, None], + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [MetaException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [NoSuchObjectException, None], + None, + ), # 2 +) + + +class alter_partition_args: + """ + Attributes: + - db_name + - tbl_name + - new_part + + """ + + def __init__( + self, + db_name=None, + tbl_name=None, + new_part=None, + ): + self.db_name = db_name + self.tbl_name = tbl_name + self.new_part = new_part + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.db_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.tbl_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRUCT: + self.new_part = Partition() + self.new_part.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("alter_partition_args") + if self.db_name is not None: + oprot.writeFieldBegin("db_name", TType.STRING, 1) + oprot.writeString(self.db_name.encode("utf-8") if sys.version_info[0] == 2 else self.db_name) + oprot.writeFieldEnd() + if self.tbl_name is not None: + oprot.writeFieldBegin("tbl_name", TType.STRING, 2) + oprot.writeString(self.tbl_name.encode("utf-8") if sys.version_info[0] == 2 else self.tbl_name) + oprot.writeFieldEnd() + if self.new_part is not None: + oprot.writeFieldBegin("new_part", TType.STRUCT, 3) + self.new_part.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(alter_partition_args) +alter_partition_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "db_name", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "tbl_name", + "UTF8", + None, + ), # 2 + ( + 3, + TType.STRUCT, + "new_part", + [Partition, None], + None, + ), # 3 +) + + +class alter_partition_result: + """ + Attributes: + - o1 + - o2 + + """ + + def __init__( + self, + o1=None, + o2=None, + ): + self.o1 = o1 + self.o2 = o2 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.o1 = InvalidOperationException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("alter_partition_result") + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(alter_partition_result) +alter_partition_result.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "o1", + [InvalidOperationException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [MetaException, None], + None, + ), # 2 +) + + +class alter_partitions_args: + """ + Attributes: + - db_name + - tbl_name + - new_parts + + """ + + def __init__( + self, + db_name=None, + tbl_name=None, + new_parts=None, + ): + self.db_name = db_name + self.tbl_name = tbl_name + self.new_parts = new_parts + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.db_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.tbl_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.LIST: + self.new_parts = [] + (_etype1643, _size1640) = iprot.readListBegin() + for _i1644 in range(_size1640): + _elem1645 = Partition() + _elem1645.read(iprot) + self.new_parts.append(_elem1645) + iprot.readListEnd() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("alter_partitions_args") + if self.db_name is not None: + oprot.writeFieldBegin("db_name", TType.STRING, 1) + oprot.writeString(self.db_name.encode("utf-8") if sys.version_info[0] == 2 else self.db_name) + oprot.writeFieldEnd() + if self.tbl_name is not None: + oprot.writeFieldBegin("tbl_name", TType.STRING, 2) + oprot.writeString(self.tbl_name.encode("utf-8") if sys.version_info[0] == 2 else self.tbl_name) + oprot.writeFieldEnd() + if self.new_parts is not None: + oprot.writeFieldBegin("new_parts", TType.LIST, 3) + oprot.writeListBegin(TType.STRUCT, len(self.new_parts)) + for iter1646 in self.new_parts: + iter1646.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(alter_partitions_args) +alter_partitions_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "db_name", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "tbl_name", + "UTF8", + None, + ), # 2 + ( + 3, + TType.LIST, + "new_parts", + (TType.STRUCT, [Partition, None], False), + None, + ), # 3 +) + + +class alter_partitions_result: + """ + Attributes: + - o1 + - o2 + + """ + + def __init__( + self, + o1=None, + o2=None, + ): + self.o1 = o1 + self.o2 = o2 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.o1 = InvalidOperationException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("alter_partitions_result") + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(alter_partitions_result) +alter_partitions_result.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "o1", + [InvalidOperationException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [MetaException, None], + None, + ), # 2 +) + + +class alter_partitions_with_environment_context_args: + """ + Attributes: + - db_name + - tbl_name + - new_parts + - environment_context + + """ + + def __init__( + self, + db_name=None, + tbl_name=None, + new_parts=None, + environment_context=None, + ): + self.db_name = db_name + self.tbl_name = tbl_name + self.new_parts = new_parts + self.environment_context = environment_context + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.db_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.tbl_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.LIST: + self.new_parts = [] + (_etype1650, _size1647) = iprot.readListBegin() + for _i1651 in range(_size1647): + _elem1652 = Partition() + _elem1652.read(iprot) + self.new_parts.append(_elem1652) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRUCT: + self.environment_context = EnvironmentContext() + self.environment_context.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("alter_partitions_with_environment_context_args") + if self.db_name is not None: + oprot.writeFieldBegin("db_name", TType.STRING, 1) + oprot.writeString(self.db_name.encode("utf-8") if sys.version_info[0] == 2 else self.db_name) + oprot.writeFieldEnd() + if self.tbl_name is not None: + oprot.writeFieldBegin("tbl_name", TType.STRING, 2) + oprot.writeString(self.tbl_name.encode("utf-8") if sys.version_info[0] == 2 else self.tbl_name) + oprot.writeFieldEnd() + if self.new_parts is not None: + oprot.writeFieldBegin("new_parts", TType.LIST, 3) + oprot.writeListBegin(TType.STRUCT, len(self.new_parts)) + for iter1653 in self.new_parts: + iter1653.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.environment_context is not None: + oprot.writeFieldBegin("environment_context", TType.STRUCT, 4) + self.environment_context.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(alter_partitions_with_environment_context_args) +alter_partitions_with_environment_context_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "db_name", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "tbl_name", + "UTF8", + None, + ), # 2 + ( + 3, + TType.LIST, + "new_parts", + (TType.STRUCT, [Partition, None], False), + None, + ), # 3 + ( + 4, + TType.STRUCT, + "environment_context", + [EnvironmentContext, None], + None, + ), # 4 +) + + +class alter_partitions_with_environment_context_result: + """ + Attributes: + - o1 + - o2 + + """ + + def __init__( + self, + o1=None, + o2=None, + ): + self.o1 = o1 + self.o2 = o2 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.o1 = InvalidOperationException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("alter_partitions_with_environment_context_result") + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(alter_partitions_with_environment_context_result) +alter_partitions_with_environment_context_result.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "o1", + [InvalidOperationException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [MetaException, None], + None, + ), # 2 +) + + +class alter_partitions_req_args: + """ + Attributes: + - req + + """ + + def __init__( + self, + req=None, + ): + self.req = req + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.req = AlterPartitionsRequest() + self.req.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("alter_partitions_req_args") + if self.req is not None: + oprot.writeFieldBegin("req", TType.STRUCT, 1) + self.req.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(alter_partitions_req_args) +alter_partitions_req_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "req", + [AlterPartitionsRequest, None], + None, + ), # 1 +) + + +class alter_partitions_req_result: + """ + Attributes: + - success + - o1 + - o2 + + """ + + def __init__( + self, + success=None, + o1=None, + o2=None, + ): + self.success = success + self.o1 = o1 + self.o2 = o2 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = AlterPartitionsResponse() + self.success.read(iprot) + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = InvalidOperationException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("alter_partitions_req_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(alter_partitions_req_result) +alter_partitions_req_result.thrift_spec = ( + ( + 0, + TType.STRUCT, + "success", + [AlterPartitionsResponse, None], + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [InvalidOperationException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [MetaException, None], + None, + ), # 2 +) + + +class alter_partition_with_environment_context_args: + """ + Attributes: + - db_name + - tbl_name + - new_part + - environment_context + + """ + + def __init__( + self, + db_name=None, + tbl_name=None, + new_part=None, + environment_context=None, + ): + self.db_name = db_name + self.tbl_name = tbl_name + self.new_part = new_part + self.environment_context = environment_context + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.db_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.tbl_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRUCT: + self.new_part = Partition() + self.new_part.read(iprot) + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRUCT: + self.environment_context = EnvironmentContext() + self.environment_context.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("alter_partition_with_environment_context_args") + if self.db_name is not None: + oprot.writeFieldBegin("db_name", TType.STRING, 1) + oprot.writeString(self.db_name.encode("utf-8") if sys.version_info[0] == 2 else self.db_name) + oprot.writeFieldEnd() + if self.tbl_name is not None: + oprot.writeFieldBegin("tbl_name", TType.STRING, 2) + oprot.writeString(self.tbl_name.encode("utf-8") if sys.version_info[0] == 2 else self.tbl_name) + oprot.writeFieldEnd() + if self.new_part is not None: + oprot.writeFieldBegin("new_part", TType.STRUCT, 3) + self.new_part.write(oprot) + oprot.writeFieldEnd() + if self.environment_context is not None: + oprot.writeFieldBegin("environment_context", TType.STRUCT, 4) + self.environment_context.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(alter_partition_with_environment_context_args) +alter_partition_with_environment_context_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "db_name", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "tbl_name", + "UTF8", + None, + ), # 2 + ( + 3, + TType.STRUCT, + "new_part", + [Partition, None], + None, + ), # 3 + ( + 4, + TType.STRUCT, + "environment_context", + [EnvironmentContext, None], + None, + ), # 4 +) + + +class alter_partition_with_environment_context_result: + """ + Attributes: + - o1 + - o2 + + """ + + def __init__( + self, + o1=None, + o2=None, + ): + self.o1 = o1 + self.o2 = o2 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.o1 = InvalidOperationException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("alter_partition_with_environment_context_result") + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(alter_partition_with_environment_context_result) +alter_partition_with_environment_context_result.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "o1", + [InvalidOperationException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [MetaException, None], + None, + ), # 2 +) + + +class rename_partition_args: + """ + Attributes: + - db_name + - tbl_name + - part_vals + - new_part + + """ + + def __init__( + self, + db_name=None, + tbl_name=None, + part_vals=None, + new_part=None, + ): + self.db_name = db_name + self.tbl_name = tbl_name + self.part_vals = part_vals + self.new_part = new_part + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.db_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.tbl_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.LIST: + self.part_vals = [] + (_etype1657, _size1654) = iprot.readListBegin() + for _i1658 in range(_size1654): + _elem1659 = ( + iprot.readString().decode("utf-8", errors="replace") + if sys.version_info[0] == 2 + else iprot.readString() + ) + self.part_vals.append(_elem1659) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRUCT: + self.new_part = Partition() + self.new_part.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("rename_partition_args") + if self.db_name is not None: + oprot.writeFieldBegin("db_name", TType.STRING, 1) + oprot.writeString(self.db_name.encode("utf-8") if sys.version_info[0] == 2 else self.db_name) + oprot.writeFieldEnd() + if self.tbl_name is not None: + oprot.writeFieldBegin("tbl_name", TType.STRING, 2) + oprot.writeString(self.tbl_name.encode("utf-8") if sys.version_info[0] == 2 else self.tbl_name) + oprot.writeFieldEnd() + if self.part_vals is not None: + oprot.writeFieldBegin("part_vals", TType.LIST, 3) + oprot.writeListBegin(TType.STRING, len(self.part_vals)) + for iter1660 in self.part_vals: + oprot.writeString(iter1660.encode("utf-8") if sys.version_info[0] == 2 else iter1660) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.new_part is not None: + oprot.writeFieldBegin("new_part", TType.STRUCT, 4) + self.new_part.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(rename_partition_args) +rename_partition_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "db_name", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "tbl_name", + "UTF8", + None, + ), # 2 + ( + 3, + TType.LIST, + "part_vals", + (TType.STRING, "UTF8", False), + None, + ), # 3 + ( + 4, + TType.STRUCT, + "new_part", + [Partition, None], + None, + ), # 4 +) + + +class rename_partition_result: + """ + Attributes: + - o1 + - o2 + + """ + + def __init__( + self, + o1=None, + o2=None, + ): + self.o1 = o1 + self.o2 = o2 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.o1 = InvalidOperationException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("rename_partition_result") + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(rename_partition_result) +rename_partition_result.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "o1", + [InvalidOperationException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [MetaException, None], + None, + ), # 2 +) + + +class rename_partition_req_args: + """ + Attributes: + - req + + """ + + def __init__( + self, + req=None, + ): + self.req = req + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.req = RenamePartitionRequest() + self.req.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("rename_partition_req_args") + if self.req is not None: + oprot.writeFieldBegin("req", TType.STRUCT, 1) + self.req.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(rename_partition_req_args) +rename_partition_req_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "req", + [RenamePartitionRequest, None], + None, + ), # 1 +) + + +class rename_partition_req_result: + """ + Attributes: + - success + - o1 + - o2 + + """ + + def __init__( + self, + success=None, + o1=None, + o2=None, + ): + self.success = success + self.o1 = o1 + self.o2 = o2 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = RenamePartitionResponse() + self.success.read(iprot) + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = InvalidOperationException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("rename_partition_req_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(rename_partition_req_result) +rename_partition_req_result.thrift_spec = ( + ( + 0, + TType.STRUCT, + "success", + [RenamePartitionResponse, None], + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [InvalidOperationException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [MetaException, None], + None, + ), # 2 +) + + +class partition_name_has_valid_characters_args: + """ + Attributes: + - part_vals + - throw_exception + + """ + + def __init__( + self, + part_vals=None, + throw_exception=None, + ): + self.part_vals = part_vals + self.throw_exception = throw_exception + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.LIST: + self.part_vals = [] + (_etype1664, _size1661) = iprot.readListBegin() + for _i1665 in range(_size1661): + _elem1666 = ( + iprot.readString().decode("utf-8", errors="replace") + if sys.version_info[0] == 2 + else iprot.readString() + ) + self.part_vals.append(_elem1666) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.BOOL: + self.throw_exception = iprot.readBool() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("partition_name_has_valid_characters_args") + if self.part_vals is not None: + oprot.writeFieldBegin("part_vals", TType.LIST, 1) + oprot.writeListBegin(TType.STRING, len(self.part_vals)) + for iter1667 in self.part_vals: + oprot.writeString(iter1667.encode("utf-8") if sys.version_info[0] == 2 else iter1667) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.throw_exception is not None: + oprot.writeFieldBegin("throw_exception", TType.BOOL, 2) + oprot.writeBool(self.throw_exception) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(partition_name_has_valid_characters_args) +partition_name_has_valid_characters_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.LIST, + "part_vals", + (TType.STRING, "UTF8", False), + None, + ), # 1 + ( + 2, + TType.BOOL, + "throw_exception", + None, + None, + ), # 2 +) + + +class partition_name_has_valid_characters_result: + """ + Attributes: + - success + - o1 + + """ + + def __init__( + self, + success=None, + o1=None, + ): + self.success = success + self.o1 = o1 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.BOOL: + self.success = iprot.readBool() + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("partition_name_has_valid_characters_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.BOOL, 0) + oprot.writeBool(self.success) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(partition_name_has_valid_characters_result) +partition_name_has_valid_characters_result.thrift_spec = ( + ( + 0, + TType.BOOL, + "success", + None, + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [MetaException, None], + None, + ), # 1 +) + + +class get_config_value_args: + """ + Attributes: + - name + - defaultValue + + """ + + def __init__( + self, + name=None, + defaultValue=None, + ): + self.name = name + self.defaultValue = defaultValue + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.defaultValue = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_config_value_args") + if self.name is not None: + oprot.writeFieldBegin("name", TType.STRING, 1) + oprot.writeString(self.name.encode("utf-8") if sys.version_info[0] == 2 else self.name) + oprot.writeFieldEnd() + if self.defaultValue is not None: + oprot.writeFieldBegin("defaultValue", TType.STRING, 2) + oprot.writeString(self.defaultValue.encode("utf-8") if sys.version_info[0] == 2 else self.defaultValue) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_config_value_args) +get_config_value_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "name", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "defaultValue", + "UTF8", + None, + ), # 2 +) + + +class get_config_value_result: + """ + Attributes: + - success + - o1 + + """ + + def __init__( + self, + success=None, + o1=None, + ): + self.success = success + self.o1 = o1 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRING: + self.success = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = ConfigValSecurityException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_config_value_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.STRING, 0) + oprot.writeString(self.success.encode("utf-8") if sys.version_info[0] == 2 else self.success) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_config_value_result) +get_config_value_result.thrift_spec = ( + ( + 0, + TType.STRING, + "success", + "UTF8", + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [ConfigValSecurityException, None], + None, + ), # 1 +) + + +class partition_name_to_vals_args: + """ + Attributes: + - part_name + + """ + + def __init__( + self, + part_name=None, + ): + self.part_name = part_name + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.part_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("partition_name_to_vals_args") + if self.part_name is not None: + oprot.writeFieldBegin("part_name", TType.STRING, 1) + oprot.writeString(self.part_name.encode("utf-8") if sys.version_info[0] == 2 else self.part_name) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(partition_name_to_vals_args) +partition_name_to_vals_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "part_name", + "UTF8", + None, + ), # 1 +) + + +class partition_name_to_vals_result: + """ + Attributes: + - success + - o1 + + """ + + def __init__( + self, + success=None, + o1=None, + ): + self.success = success + self.o1 = o1 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.LIST: + self.success = [] + (_etype1671, _size1668) = iprot.readListBegin() + for _i1672 in range(_size1668): + _elem1673 = ( + iprot.readString().decode("utf-8", errors="replace") + if sys.version_info[0] == 2 + else iprot.readString() + ) + self.success.append(_elem1673) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("partition_name_to_vals_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.LIST, 0) + oprot.writeListBegin(TType.STRING, len(self.success)) + for iter1674 in self.success: + oprot.writeString(iter1674.encode("utf-8") if sys.version_info[0] == 2 else iter1674) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(partition_name_to_vals_result) +partition_name_to_vals_result.thrift_spec = ( + ( + 0, + TType.LIST, + "success", + (TType.STRING, "UTF8", False), + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [MetaException, None], + None, + ), # 1 +) + + +class partition_name_to_spec_args: + """ + Attributes: + - part_name + + """ + + def __init__( + self, + part_name=None, + ): + self.part_name = part_name + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.part_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("partition_name_to_spec_args") + if self.part_name is not None: + oprot.writeFieldBegin("part_name", TType.STRING, 1) + oprot.writeString(self.part_name.encode("utf-8") if sys.version_info[0] == 2 else self.part_name) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(partition_name_to_spec_args) +partition_name_to_spec_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "part_name", + "UTF8", + None, + ), # 1 +) + + +class partition_name_to_spec_result: + """ + Attributes: + - success + - o1 + + """ + + def __init__( + self, + success=None, + o1=None, + ): + self.success = success + self.o1 = o1 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.MAP: + self.success = {} + (_ktype1676, _vtype1677, _size1675) = iprot.readMapBegin() + for _i1679 in range(_size1675): + _key1680 = ( + iprot.readString().decode("utf-8", errors="replace") + if sys.version_info[0] == 2 + else iprot.readString() + ) + _val1681 = ( + iprot.readString().decode("utf-8", errors="replace") + if sys.version_info[0] == 2 + else iprot.readString() + ) + self.success[_key1680] = _val1681 + iprot.readMapEnd() + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("partition_name_to_spec_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.MAP, 0) + oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.success)) + for kiter1682, viter1683 in self.success.items(): + oprot.writeString(kiter1682.encode("utf-8") if sys.version_info[0] == 2 else kiter1682) + oprot.writeString(viter1683.encode("utf-8") if sys.version_info[0] == 2 else viter1683) + oprot.writeMapEnd() + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(partition_name_to_spec_result) +partition_name_to_spec_result.thrift_spec = ( + ( + 0, + TType.MAP, + "success", + (TType.STRING, "UTF8", TType.STRING, "UTF8", False), + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [MetaException, None], + None, + ), # 1 +) + + +class markPartitionForEvent_args: + """ + Attributes: + - db_name + - tbl_name + - part_vals + - eventType + + """ + + def __init__( + self, + db_name=None, + tbl_name=None, + part_vals=None, + eventType=None, + ): + self.db_name = db_name + self.tbl_name = tbl_name + self.part_vals = part_vals + self.eventType = eventType + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.db_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.tbl_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.MAP: + self.part_vals = {} + (_ktype1685, _vtype1686, _size1684) = iprot.readMapBegin() + for _i1688 in range(_size1684): + _key1689 = ( + iprot.readString().decode("utf-8", errors="replace") + if sys.version_info[0] == 2 + else iprot.readString() + ) + _val1690 = ( + iprot.readString().decode("utf-8", errors="replace") + if sys.version_info[0] == 2 + else iprot.readString() + ) + self.part_vals[_key1689] = _val1690 + iprot.readMapEnd() + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.I32: + self.eventType = iprot.readI32() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("markPartitionForEvent_args") + if self.db_name is not None: + oprot.writeFieldBegin("db_name", TType.STRING, 1) + oprot.writeString(self.db_name.encode("utf-8") if sys.version_info[0] == 2 else self.db_name) + oprot.writeFieldEnd() + if self.tbl_name is not None: + oprot.writeFieldBegin("tbl_name", TType.STRING, 2) + oprot.writeString(self.tbl_name.encode("utf-8") if sys.version_info[0] == 2 else self.tbl_name) + oprot.writeFieldEnd() + if self.part_vals is not None: + oprot.writeFieldBegin("part_vals", TType.MAP, 3) + oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.part_vals)) + for kiter1691, viter1692 in self.part_vals.items(): + oprot.writeString(kiter1691.encode("utf-8") if sys.version_info[0] == 2 else kiter1691) + oprot.writeString(viter1692.encode("utf-8") if sys.version_info[0] == 2 else viter1692) + oprot.writeMapEnd() + oprot.writeFieldEnd() + if self.eventType is not None: + oprot.writeFieldBegin("eventType", TType.I32, 4) + oprot.writeI32(self.eventType) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(markPartitionForEvent_args) +markPartitionForEvent_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "db_name", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "tbl_name", + "UTF8", + None, + ), # 2 + ( + 3, + TType.MAP, + "part_vals", + (TType.STRING, "UTF8", TType.STRING, "UTF8", False), + None, + ), # 3 + ( + 4, + TType.I32, + "eventType", + None, + None, + ), # 4 +) + + +class markPartitionForEvent_result: + """ + Attributes: + - o1 + - o2 + - o3 + - o4 + - o5 + - o6 + + """ + + def __init__( + self, + o1=None, + o2=None, + o3=None, + o4=None, + o5=None, + o6=None, + ): + self.o1 = o1 + self.o2 = o2 + self.o3 = o3 + self.o4 = o4 + self.o5 = o5 + self.o6 = o6 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = NoSuchObjectException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRUCT: + self.o3 = UnknownDBException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRUCT: + self.o4 = UnknownTableException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.STRUCT: + self.o5 = UnknownPartitionException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 6: + if ftype == TType.STRUCT: + self.o6 = InvalidPartitionException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("markPartitionForEvent_result") + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + if self.o3 is not None: + oprot.writeFieldBegin("o3", TType.STRUCT, 3) + self.o3.write(oprot) + oprot.writeFieldEnd() + if self.o4 is not None: + oprot.writeFieldBegin("o4", TType.STRUCT, 4) + self.o4.write(oprot) + oprot.writeFieldEnd() + if self.o5 is not None: + oprot.writeFieldBegin("o5", TType.STRUCT, 5) + self.o5.write(oprot) + oprot.writeFieldEnd() + if self.o6 is not None: + oprot.writeFieldBegin("o6", TType.STRUCT, 6) + self.o6.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(markPartitionForEvent_result) +markPartitionForEvent_result.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "o1", + [MetaException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [NoSuchObjectException, None], + None, + ), # 2 + ( + 3, + TType.STRUCT, + "o3", + [UnknownDBException, None], + None, + ), # 3 + ( + 4, + TType.STRUCT, + "o4", + [UnknownTableException, None], + None, + ), # 4 + ( + 5, + TType.STRUCT, + "o5", + [UnknownPartitionException, None], + None, + ), # 5 + ( + 6, + TType.STRUCT, + "o6", + [InvalidPartitionException, None], + None, + ), # 6 +) + + +class isPartitionMarkedForEvent_args: + """ + Attributes: + - db_name + - tbl_name + - part_vals + - eventType + + """ + + def __init__( + self, + db_name=None, + tbl_name=None, + part_vals=None, + eventType=None, + ): + self.db_name = db_name + self.tbl_name = tbl_name + self.part_vals = part_vals + self.eventType = eventType + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.db_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.tbl_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.MAP: + self.part_vals = {} + (_ktype1694, _vtype1695, _size1693) = iprot.readMapBegin() + for _i1697 in range(_size1693): + _key1698 = ( + iprot.readString().decode("utf-8", errors="replace") + if sys.version_info[0] == 2 + else iprot.readString() + ) + _val1699 = ( + iprot.readString().decode("utf-8", errors="replace") + if sys.version_info[0] == 2 + else iprot.readString() + ) + self.part_vals[_key1698] = _val1699 + iprot.readMapEnd() + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.I32: + self.eventType = iprot.readI32() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("isPartitionMarkedForEvent_args") + if self.db_name is not None: + oprot.writeFieldBegin("db_name", TType.STRING, 1) + oprot.writeString(self.db_name.encode("utf-8") if sys.version_info[0] == 2 else self.db_name) + oprot.writeFieldEnd() + if self.tbl_name is not None: + oprot.writeFieldBegin("tbl_name", TType.STRING, 2) + oprot.writeString(self.tbl_name.encode("utf-8") if sys.version_info[0] == 2 else self.tbl_name) + oprot.writeFieldEnd() + if self.part_vals is not None: + oprot.writeFieldBegin("part_vals", TType.MAP, 3) + oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.part_vals)) + for kiter1700, viter1701 in self.part_vals.items(): + oprot.writeString(kiter1700.encode("utf-8") if sys.version_info[0] == 2 else kiter1700) + oprot.writeString(viter1701.encode("utf-8") if sys.version_info[0] == 2 else viter1701) + oprot.writeMapEnd() + oprot.writeFieldEnd() + if self.eventType is not None: + oprot.writeFieldBegin("eventType", TType.I32, 4) + oprot.writeI32(self.eventType) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(isPartitionMarkedForEvent_args) +isPartitionMarkedForEvent_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "db_name", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "tbl_name", + "UTF8", + None, + ), # 2 + ( + 3, + TType.MAP, + "part_vals", + (TType.STRING, "UTF8", TType.STRING, "UTF8", False), + None, + ), # 3 + ( + 4, + TType.I32, + "eventType", + None, + None, + ), # 4 +) + + +class isPartitionMarkedForEvent_result: + """ + Attributes: + - success + - o1 + - o2 + - o3 + - o4 + - o5 + - o6 + + """ + + def __init__( + self, + success=None, + o1=None, + o2=None, + o3=None, + o4=None, + o5=None, + o6=None, + ): + self.success = success + self.o1 = o1 + self.o2 = o2 + self.o3 = o3 + self.o4 = o4 + self.o5 = o5 + self.o6 = o6 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.BOOL: + self.success = iprot.readBool() + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = NoSuchObjectException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRUCT: + self.o3 = UnknownDBException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRUCT: + self.o4 = UnknownTableException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.STRUCT: + self.o5 = UnknownPartitionException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 6: + if ftype == TType.STRUCT: + self.o6 = InvalidPartitionException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("isPartitionMarkedForEvent_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.BOOL, 0) + oprot.writeBool(self.success) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + if self.o3 is not None: + oprot.writeFieldBegin("o3", TType.STRUCT, 3) + self.o3.write(oprot) + oprot.writeFieldEnd() + if self.o4 is not None: + oprot.writeFieldBegin("o4", TType.STRUCT, 4) + self.o4.write(oprot) + oprot.writeFieldEnd() + if self.o5 is not None: + oprot.writeFieldBegin("o5", TType.STRUCT, 5) + self.o5.write(oprot) + oprot.writeFieldEnd() + if self.o6 is not None: + oprot.writeFieldBegin("o6", TType.STRUCT, 6) + self.o6.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(isPartitionMarkedForEvent_result) +isPartitionMarkedForEvent_result.thrift_spec = ( + ( + 0, + TType.BOOL, + "success", + None, + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [MetaException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [NoSuchObjectException, None], + None, + ), # 2 + ( + 3, + TType.STRUCT, + "o3", + [UnknownDBException, None], + None, + ), # 3 + ( + 4, + TType.STRUCT, + "o4", + [UnknownTableException, None], + None, + ), # 4 + ( + 5, + TType.STRUCT, + "o5", + [UnknownPartitionException, None], + None, + ), # 5 + ( + 6, + TType.STRUCT, + "o6", + [InvalidPartitionException, None], + None, + ), # 6 +) + + +class get_primary_keys_args: + """ + Attributes: + - request + + """ + + def __init__( + self, + request=None, + ): + self.request = request + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.request = PrimaryKeysRequest() + self.request.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_primary_keys_args") + if self.request is not None: + oprot.writeFieldBegin("request", TType.STRUCT, 1) + self.request.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_primary_keys_args) +get_primary_keys_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "request", + [PrimaryKeysRequest, None], + None, + ), # 1 +) + + +class get_primary_keys_result: + """ + Attributes: + - success + - o1 + - o2 + + """ + + def __init__( + self, + success=None, + o1=None, + o2=None, + ): + self.success = success + self.o1 = o1 + self.o2 = o2 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = PrimaryKeysResponse() + self.success.read(iprot) + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = NoSuchObjectException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_primary_keys_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_primary_keys_result) +get_primary_keys_result.thrift_spec = ( + ( + 0, + TType.STRUCT, + "success", + [PrimaryKeysResponse, None], + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [MetaException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [NoSuchObjectException, None], + None, + ), # 2 +) + + +class get_foreign_keys_args: + """ + Attributes: + - request + + """ + + def __init__( + self, + request=None, + ): + self.request = request + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.request = ForeignKeysRequest() + self.request.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_foreign_keys_args") + if self.request is not None: + oprot.writeFieldBegin("request", TType.STRUCT, 1) + self.request.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_foreign_keys_args) +get_foreign_keys_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "request", + [ForeignKeysRequest, None], + None, + ), # 1 +) + + +class get_foreign_keys_result: + """ + Attributes: + - success + - o1 + - o2 + + """ + + def __init__( + self, + success=None, + o1=None, + o2=None, + ): + self.success = success + self.o1 = o1 + self.o2 = o2 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = ForeignKeysResponse() + self.success.read(iprot) + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = NoSuchObjectException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_foreign_keys_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_foreign_keys_result) +get_foreign_keys_result.thrift_spec = ( + ( + 0, + TType.STRUCT, + "success", + [ForeignKeysResponse, None], + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [MetaException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [NoSuchObjectException, None], + None, + ), # 2 +) + + +class get_unique_constraints_args: + """ + Attributes: + - request + + """ + + def __init__( + self, + request=None, + ): + self.request = request + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.request = UniqueConstraintsRequest() + self.request.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_unique_constraints_args") + if self.request is not None: + oprot.writeFieldBegin("request", TType.STRUCT, 1) + self.request.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_unique_constraints_args) +get_unique_constraints_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "request", + [UniqueConstraintsRequest, None], + None, + ), # 1 +) + + +class get_unique_constraints_result: + """ + Attributes: + - success + - o1 + - o2 + + """ + + def __init__( + self, + success=None, + o1=None, + o2=None, + ): + self.success = success + self.o1 = o1 + self.o2 = o2 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = UniqueConstraintsResponse() + self.success.read(iprot) + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = NoSuchObjectException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_unique_constraints_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_unique_constraints_result) +get_unique_constraints_result.thrift_spec = ( + ( + 0, + TType.STRUCT, + "success", + [UniqueConstraintsResponse, None], + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [MetaException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [NoSuchObjectException, None], + None, + ), # 2 +) + + +class get_not_null_constraints_args: + """ + Attributes: + - request + + """ + + def __init__( + self, + request=None, + ): + self.request = request + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.request = NotNullConstraintsRequest() + self.request.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_not_null_constraints_args") + if self.request is not None: + oprot.writeFieldBegin("request", TType.STRUCT, 1) + self.request.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_not_null_constraints_args) +get_not_null_constraints_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "request", + [NotNullConstraintsRequest, None], + None, + ), # 1 +) + + +class get_not_null_constraints_result: + """ + Attributes: + - success + - o1 + - o2 + + """ + + def __init__( + self, + success=None, + o1=None, + o2=None, + ): + self.success = success + self.o1 = o1 + self.o2 = o2 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = NotNullConstraintsResponse() + self.success.read(iprot) + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = NoSuchObjectException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_not_null_constraints_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_not_null_constraints_result) +get_not_null_constraints_result.thrift_spec = ( + ( + 0, + TType.STRUCT, + "success", + [NotNullConstraintsResponse, None], + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [MetaException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [NoSuchObjectException, None], + None, + ), # 2 +) + + +class get_default_constraints_args: + """ + Attributes: + - request + + """ + + def __init__( + self, + request=None, + ): + self.request = request + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.request = DefaultConstraintsRequest() + self.request.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_default_constraints_args") + if self.request is not None: + oprot.writeFieldBegin("request", TType.STRUCT, 1) + self.request.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_default_constraints_args) +get_default_constraints_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "request", + [DefaultConstraintsRequest, None], + None, + ), # 1 +) + + +class get_default_constraints_result: + """ + Attributes: + - success + - o1 + - o2 + + """ + + def __init__( + self, + success=None, + o1=None, + o2=None, + ): + self.success = success + self.o1 = o1 + self.o2 = o2 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = DefaultConstraintsResponse() + self.success.read(iprot) + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = NoSuchObjectException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_default_constraints_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_default_constraints_result) +get_default_constraints_result.thrift_spec = ( + ( + 0, + TType.STRUCT, + "success", + [DefaultConstraintsResponse, None], + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [MetaException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [NoSuchObjectException, None], + None, + ), # 2 +) + + +class get_check_constraints_args: + """ + Attributes: + - request + + """ + + def __init__( + self, + request=None, + ): + self.request = request + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.request = CheckConstraintsRequest() + self.request.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_check_constraints_args") + if self.request is not None: + oprot.writeFieldBegin("request", TType.STRUCT, 1) + self.request.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_check_constraints_args) +get_check_constraints_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "request", + [CheckConstraintsRequest, None], + None, + ), # 1 +) + + +class get_check_constraints_result: + """ + Attributes: + - success + - o1 + - o2 + + """ + + def __init__( + self, + success=None, + o1=None, + o2=None, + ): + self.success = success + self.o1 = o1 + self.o2 = o2 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = CheckConstraintsResponse() + self.success.read(iprot) + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = NoSuchObjectException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_check_constraints_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_check_constraints_result) +get_check_constraints_result.thrift_spec = ( + ( + 0, + TType.STRUCT, + "success", + [CheckConstraintsResponse, None], + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [MetaException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [NoSuchObjectException, None], + None, + ), # 2 +) + + +class get_all_table_constraints_args: + """ + Attributes: + - request + + """ + + def __init__( + self, + request=None, + ): + self.request = request + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.request = AllTableConstraintsRequest() + self.request.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_all_table_constraints_args") + if self.request is not None: + oprot.writeFieldBegin("request", TType.STRUCT, 1) + self.request.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_all_table_constraints_args) +get_all_table_constraints_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "request", + [AllTableConstraintsRequest, None], + None, + ), # 1 +) + + +class get_all_table_constraints_result: + """ + Attributes: + - success + - o1 + - o2 + + """ + + def __init__( + self, + success=None, + o1=None, + o2=None, + ): + self.success = success + self.o1 = o1 + self.o2 = o2 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = AllTableConstraintsResponse() + self.success.read(iprot) + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = NoSuchObjectException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_all_table_constraints_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_all_table_constraints_result) +get_all_table_constraints_result.thrift_spec = ( + ( + 0, + TType.STRUCT, + "success", + [AllTableConstraintsResponse, None], + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [MetaException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [NoSuchObjectException, None], + None, + ), # 2 +) + + +class update_table_column_statistics_args: + """ + Attributes: + - stats_obj + + """ + + def __init__( + self, + stats_obj=None, + ): + self.stats_obj = stats_obj + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.stats_obj = ColumnStatistics() + self.stats_obj.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("update_table_column_statistics_args") + if self.stats_obj is not None: + oprot.writeFieldBegin("stats_obj", TType.STRUCT, 1) + self.stats_obj.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(update_table_column_statistics_args) +update_table_column_statistics_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "stats_obj", + [ColumnStatistics, None], + None, + ), # 1 +) + + +class update_table_column_statistics_result: + """ + Attributes: + - success + - o1 + - o2 + - o3 + - o4 + + """ + + def __init__( + self, + success=None, + o1=None, + o2=None, + o3=None, + o4=None, + ): + self.success = success + self.o1 = o1 + self.o2 = o2 + self.o3 = o3 + self.o4 = o4 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.BOOL: + self.success = iprot.readBool() + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = NoSuchObjectException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = InvalidObjectException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRUCT: + self.o3 = MetaException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRUCT: + self.o4 = InvalidInputException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("update_table_column_statistics_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.BOOL, 0) + oprot.writeBool(self.success) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + if self.o3 is not None: + oprot.writeFieldBegin("o3", TType.STRUCT, 3) + self.o3.write(oprot) + oprot.writeFieldEnd() + if self.o4 is not None: + oprot.writeFieldBegin("o4", TType.STRUCT, 4) + self.o4.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(update_table_column_statistics_result) +update_table_column_statistics_result.thrift_spec = ( + ( + 0, + TType.BOOL, + "success", + None, + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [NoSuchObjectException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [InvalidObjectException, None], + None, + ), # 2 + ( + 3, + TType.STRUCT, + "o3", + [MetaException, None], + None, + ), # 3 + ( + 4, + TType.STRUCT, + "o4", + [InvalidInputException, None], + None, + ), # 4 +) + + +class update_partition_column_statistics_args: + """ + Attributes: + - stats_obj + + """ + + def __init__( + self, + stats_obj=None, + ): + self.stats_obj = stats_obj + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.stats_obj = ColumnStatistics() + self.stats_obj.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("update_partition_column_statistics_args") + if self.stats_obj is not None: + oprot.writeFieldBegin("stats_obj", TType.STRUCT, 1) + self.stats_obj.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(update_partition_column_statistics_args) +update_partition_column_statistics_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "stats_obj", + [ColumnStatistics, None], + None, + ), # 1 +) + + +class update_partition_column_statistics_result: + """ + Attributes: + - success + - o1 + - o2 + - o3 + - o4 + + """ + + def __init__( + self, + success=None, + o1=None, + o2=None, + o3=None, + o4=None, + ): + self.success = success + self.o1 = o1 + self.o2 = o2 + self.o3 = o3 + self.o4 = o4 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.BOOL: + self.success = iprot.readBool() + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = NoSuchObjectException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = InvalidObjectException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRUCT: + self.o3 = MetaException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRUCT: + self.o4 = InvalidInputException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("update_partition_column_statistics_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.BOOL, 0) + oprot.writeBool(self.success) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + if self.o3 is not None: + oprot.writeFieldBegin("o3", TType.STRUCT, 3) + self.o3.write(oprot) + oprot.writeFieldEnd() + if self.o4 is not None: + oprot.writeFieldBegin("o4", TType.STRUCT, 4) + self.o4.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(update_partition_column_statistics_result) +update_partition_column_statistics_result.thrift_spec = ( + ( + 0, + TType.BOOL, + "success", + None, + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [NoSuchObjectException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [InvalidObjectException, None], + None, + ), # 2 + ( + 3, + TType.STRUCT, + "o3", + [MetaException, None], + None, + ), # 3 + ( + 4, + TType.STRUCT, + "o4", + [InvalidInputException, None], + None, + ), # 4 +) + + +class update_table_column_statistics_req_args: + """ + Attributes: + - req + + """ + + def __init__( + self, + req=None, + ): + self.req = req + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.req = SetPartitionsStatsRequest() + self.req.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("update_table_column_statistics_req_args") + if self.req is not None: + oprot.writeFieldBegin("req", TType.STRUCT, 1) + self.req.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(update_table_column_statistics_req_args) +update_table_column_statistics_req_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "req", + [SetPartitionsStatsRequest, None], + None, + ), # 1 +) + + +class update_table_column_statistics_req_result: + """ + Attributes: + - success + - o1 + - o2 + - o3 + - o4 + + """ + + def __init__( + self, + success=None, + o1=None, + o2=None, + o3=None, + o4=None, + ): + self.success = success + self.o1 = o1 + self.o2 = o2 + self.o3 = o3 + self.o4 = o4 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = SetPartitionsStatsResponse() + self.success.read(iprot) + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = NoSuchObjectException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = InvalidObjectException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRUCT: + self.o3 = MetaException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRUCT: + self.o4 = InvalidInputException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("update_table_column_statistics_req_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + if self.o3 is not None: + oprot.writeFieldBegin("o3", TType.STRUCT, 3) + self.o3.write(oprot) + oprot.writeFieldEnd() + if self.o4 is not None: + oprot.writeFieldBegin("o4", TType.STRUCT, 4) + self.o4.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(update_table_column_statistics_req_result) +update_table_column_statistics_req_result.thrift_spec = ( + ( + 0, + TType.STRUCT, + "success", + [SetPartitionsStatsResponse, None], + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [NoSuchObjectException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [InvalidObjectException, None], + None, + ), # 2 + ( + 3, + TType.STRUCT, + "o3", + [MetaException, None], + None, + ), # 3 + ( + 4, + TType.STRUCT, + "o4", + [InvalidInputException, None], + None, + ), # 4 +) + + +class update_partition_column_statistics_req_args: + """ + Attributes: + - req + + """ + + def __init__( + self, + req=None, + ): + self.req = req + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.req = SetPartitionsStatsRequest() + self.req.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("update_partition_column_statistics_req_args") + if self.req is not None: + oprot.writeFieldBegin("req", TType.STRUCT, 1) + self.req.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(update_partition_column_statistics_req_args) +update_partition_column_statistics_req_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "req", + [SetPartitionsStatsRequest, None], + None, + ), # 1 +) + + +class update_partition_column_statistics_req_result: + """ + Attributes: + - success + - o1 + - o2 + - o3 + - o4 + + """ + + def __init__( + self, + success=None, + o1=None, + o2=None, + o3=None, + o4=None, + ): + self.success = success + self.o1 = o1 + self.o2 = o2 + self.o3 = o3 + self.o4 = o4 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = SetPartitionsStatsResponse() + self.success.read(iprot) + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = NoSuchObjectException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = InvalidObjectException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRUCT: + self.o3 = MetaException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRUCT: + self.o4 = InvalidInputException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("update_partition_column_statistics_req_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + if self.o3 is not None: + oprot.writeFieldBegin("o3", TType.STRUCT, 3) + self.o3.write(oprot) + oprot.writeFieldEnd() + if self.o4 is not None: + oprot.writeFieldBegin("o4", TType.STRUCT, 4) + self.o4.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(update_partition_column_statistics_req_result) +update_partition_column_statistics_req_result.thrift_spec = ( + ( + 0, + TType.STRUCT, + "success", + [SetPartitionsStatsResponse, None], + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [NoSuchObjectException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [InvalidObjectException, None], + None, + ), # 2 + ( + 3, + TType.STRUCT, + "o3", + [MetaException, None], + None, + ), # 3 + ( + 4, + TType.STRUCT, + "o4", + [InvalidInputException, None], + None, + ), # 4 +) + + +class update_transaction_statistics_args: + """ + Attributes: + - req + + """ + + def __init__( + self, + req=None, + ): + self.req = req + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.req = UpdateTransactionalStatsRequest() + self.req.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("update_transaction_statistics_args") + if self.req is not None: + oprot.writeFieldBegin("req", TType.STRUCT, 1) + self.req.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(update_transaction_statistics_args) +update_transaction_statistics_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "req", + [UpdateTransactionalStatsRequest, None], + None, + ), # 1 +) + + +class update_transaction_statistics_result: + """ + Attributes: + - o1 + + """ + + def __init__( + self, + o1=None, + ): + self.o1 = o1 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("update_transaction_statistics_result") + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(update_transaction_statistics_result) +update_transaction_statistics_result.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "o1", + [MetaException, None], + None, + ), # 1 +) + + +class get_table_column_statistics_args: + """ + Attributes: + - db_name + - tbl_name + - col_name + + """ + + def __init__( + self, + db_name=None, + tbl_name=None, + col_name=None, + ): + self.db_name = db_name + self.tbl_name = tbl_name + self.col_name = col_name + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.db_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.tbl_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.col_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_table_column_statistics_args") + if self.db_name is not None: + oprot.writeFieldBegin("db_name", TType.STRING, 1) + oprot.writeString(self.db_name.encode("utf-8") if sys.version_info[0] == 2 else self.db_name) + oprot.writeFieldEnd() + if self.tbl_name is not None: + oprot.writeFieldBegin("tbl_name", TType.STRING, 2) + oprot.writeString(self.tbl_name.encode("utf-8") if sys.version_info[0] == 2 else self.tbl_name) + oprot.writeFieldEnd() + if self.col_name is not None: + oprot.writeFieldBegin("col_name", TType.STRING, 3) + oprot.writeString(self.col_name.encode("utf-8") if sys.version_info[0] == 2 else self.col_name) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_table_column_statistics_args) +get_table_column_statistics_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "db_name", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "tbl_name", + "UTF8", + None, + ), # 2 + ( + 3, + TType.STRING, + "col_name", + "UTF8", + None, + ), # 3 +) + + +class get_table_column_statistics_result: + """ + Attributes: + - success + - o1 + - o2 + - o3 + - o4 + + """ + + def __init__( + self, + success=None, + o1=None, + o2=None, + o3=None, + o4=None, + ): + self.success = success + self.o1 = o1 + self.o2 = o2 + self.o3 = o3 + self.o4 = o4 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = ColumnStatistics() + self.success.read(iprot) + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = NoSuchObjectException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = MetaException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRUCT: + self.o3 = InvalidInputException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRUCT: + self.o4 = InvalidObjectException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_table_column_statistics_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + if self.o3 is not None: + oprot.writeFieldBegin("o3", TType.STRUCT, 3) + self.o3.write(oprot) + oprot.writeFieldEnd() + if self.o4 is not None: + oprot.writeFieldBegin("o4", TType.STRUCT, 4) + self.o4.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_table_column_statistics_result) +get_table_column_statistics_result.thrift_spec = ( + ( + 0, + TType.STRUCT, + "success", + [ColumnStatistics, None], + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [NoSuchObjectException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [MetaException, None], + None, + ), # 2 + ( + 3, + TType.STRUCT, + "o3", + [InvalidInputException, None], + None, + ), # 3 + ( + 4, + TType.STRUCT, + "o4", + [InvalidObjectException, None], + None, + ), # 4 +) + + +class get_partition_column_statistics_args: + """ + Attributes: + - db_name + - tbl_name + - part_name + - col_name + + """ + + def __init__( + self, + db_name=None, + tbl_name=None, + part_name=None, + col_name=None, + ): + self.db_name = db_name + self.tbl_name = tbl_name + self.part_name = part_name + self.col_name = col_name + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.db_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.tbl_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.part_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRING: + self.col_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_partition_column_statistics_args") + if self.db_name is not None: + oprot.writeFieldBegin("db_name", TType.STRING, 1) + oprot.writeString(self.db_name.encode("utf-8") if sys.version_info[0] == 2 else self.db_name) + oprot.writeFieldEnd() + if self.tbl_name is not None: + oprot.writeFieldBegin("tbl_name", TType.STRING, 2) + oprot.writeString(self.tbl_name.encode("utf-8") if sys.version_info[0] == 2 else self.tbl_name) + oprot.writeFieldEnd() + if self.part_name is not None: + oprot.writeFieldBegin("part_name", TType.STRING, 3) + oprot.writeString(self.part_name.encode("utf-8") if sys.version_info[0] == 2 else self.part_name) + oprot.writeFieldEnd() + if self.col_name is not None: + oprot.writeFieldBegin("col_name", TType.STRING, 4) + oprot.writeString(self.col_name.encode("utf-8") if sys.version_info[0] == 2 else self.col_name) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_partition_column_statistics_args) +get_partition_column_statistics_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "db_name", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "tbl_name", + "UTF8", + None, + ), # 2 + ( + 3, + TType.STRING, + "part_name", + "UTF8", + None, + ), # 3 + ( + 4, + TType.STRING, + "col_name", + "UTF8", + None, + ), # 4 +) + + +class get_partition_column_statistics_result: + """ + Attributes: + - success + - o1 + - o2 + - o3 + - o4 + + """ + + def __init__( + self, + success=None, + o1=None, + o2=None, + o3=None, + o4=None, + ): + self.success = success + self.o1 = o1 + self.o2 = o2 + self.o3 = o3 + self.o4 = o4 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = ColumnStatistics() + self.success.read(iprot) + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = NoSuchObjectException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = MetaException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRUCT: + self.o3 = InvalidInputException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRUCT: + self.o4 = InvalidObjectException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_partition_column_statistics_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + if self.o3 is not None: + oprot.writeFieldBegin("o3", TType.STRUCT, 3) + self.o3.write(oprot) + oprot.writeFieldEnd() + if self.o4 is not None: + oprot.writeFieldBegin("o4", TType.STRUCT, 4) + self.o4.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_partition_column_statistics_result) +get_partition_column_statistics_result.thrift_spec = ( + ( + 0, + TType.STRUCT, + "success", + [ColumnStatistics, None], + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [NoSuchObjectException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [MetaException, None], + None, + ), # 2 + ( + 3, + TType.STRUCT, + "o3", + [InvalidInputException, None], + None, + ), # 3 + ( + 4, + TType.STRUCT, + "o4", + [InvalidObjectException, None], + None, + ), # 4 +) + + +class get_table_statistics_req_args: + """ + Attributes: + - request + + """ + + def __init__( + self, + request=None, + ): + self.request = request + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.request = TableStatsRequest() + self.request.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_table_statistics_req_args") + if self.request is not None: + oprot.writeFieldBegin("request", TType.STRUCT, 1) + self.request.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_table_statistics_req_args) +get_table_statistics_req_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "request", + [TableStatsRequest, None], + None, + ), # 1 +) + + +class get_table_statistics_req_result: + """ + Attributes: + - success + - o1 + - o2 + + """ + + def __init__( + self, + success=None, + o1=None, + o2=None, + ): + self.success = success + self.o1 = o1 + self.o2 = o2 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = TableStatsResult() + self.success.read(iprot) + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = NoSuchObjectException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_table_statistics_req_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_table_statistics_req_result) +get_table_statistics_req_result.thrift_spec = ( + ( + 0, + TType.STRUCT, + "success", + [TableStatsResult, None], + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [NoSuchObjectException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [MetaException, None], + None, + ), # 2 +) + + +class get_partitions_statistics_req_args: + """ + Attributes: + - request + + """ + + def __init__( + self, + request=None, + ): + self.request = request + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.request = PartitionsStatsRequest() + self.request.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_partitions_statistics_req_args") + if self.request is not None: + oprot.writeFieldBegin("request", TType.STRUCT, 1) + self.request.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_partitions_statistics_req_args) +get_partitions_statistics_req_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "request", + [PartitionsStatsRequest, None], + None, + ), # 1 +) + + +class get_partitions_statistics_req_result: + """ + Attributes: + - success + - o1 + - o2 + + """ + + def __init__( + self, + success=None, + o1=None, + o2=None, + ): + self.success = success + self.o1 = o1 + self.o2 = o2 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = PartitionsStatsResult() + self.success.read(iprot) + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = NoSuchObjectException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_partitions_statistics_req_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_partitions_statistics_req_result) +get_partitions_statistics_req_result.thrift_spec = ( + ( + 0, + TType.STRUCT, + "success", + [PartitionsStatsResult, None], + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [NoSuchObjectException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [MetaException, None], + None, + ), # 2 +) + + +class get_aggr_stats_for_args: + """ + Attributes: + - request + + """ + + def __init__( + self, + request=None, + ): + self.request = request + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.request = PartitionsStatsRequest() + self.request.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_aggr_stats_for_args") + if self.request is not None: + oprot.writeFieldBegin("request", TType.STRUCT, 1) + self.request.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_aggr_stats_for_args) +get_aggr_stats_for_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "request", + [PartitionsStatsRequest, None], + None, + ), # 1 +) + + +class get_aggr_stats_for_result: + """ + Attributes: + - success + - o1 + - o2 + + """ + + def __init__( + self, + success=None, + o1=None, + o2=None, + ): + self.success = success + self.o1 = o1 + self.o2 = o2 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = AggrStats() + self.success.read(iprot) + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = NoSuchObjectException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_aggr_stats_for_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_aggr_stats_for_result) +get_aggr_stats_for_result.thrift_spec = ( + ( + 0, + TType.STRUCT, + "success", + [AggrStats, None], + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [NoSuchObjectException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [MetaException, None], + None, + ), # 2 +) + + +class set_aggr_stats_for_args: + """ + Attributes: + - request + + """ + + def __init__( + self, + request=None, + ): + self.request = request + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.request = SetPartitionsStatsRequest() + self.request.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("set_aggr_stats_for_args") + if self.request is not None: + oprot.writeFieldBegin("request", TType.STRUCT, 1) + self.request.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(set_aggr_stats_for_args) +set_aggr_stats_for_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "request", + [SetPartitionsStatsRequest, None], + None, + ), # 1 +) + + +class set_aggr_stats_for_result: + """ + Attributes: + - success + - o1 + - o2 + - o3 + - o4 + + """ + + def __init__( + self, + success=None, + o1=None, + o2=None, + o3=None, + o4=None, + ): + self.success = success + self.o1 = o1 + self.o2 = o2 + self.o3 = o3 + self.o4 = o4 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.BOOL: + self.success = iprot.readBool() + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = NoSuchObjectException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = InvalidObjectException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRUCT: + self.o3 = MetaException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRUCT: + self.o4 = InvalidInputException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("set_aggr_stats_for_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.BOOL, 0) + oprot.writeBool(self.success) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + if self.o3 is not None: + oprot.writeFieldBegin("o3", TType.STRUCT, 3) + self.o3.write(oprot) + oprot.writeFieldEnd() + if self.o4 is not None: + oprot.writeFieldBegin("o4", TType.STRUCT, 4) + self.o4.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(set_aggr_stats_for_result) +set_aggr_stats_for_result.thrift_spec = ( + ( + 0, + TType.BOOL, + "success", + None, + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [NoSuchObjectException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [InvalidObjectException, None], + None, + ), # 2 + ( + 3, + TType.STRUCT, + "o3", + [MetaException, None], + None, + ), # 3 + ( + 4, + TType.STRUCT, + "o4", + [InvalidInputException, None], + None, + ), # 4 +) + + +class delete_partition_column_statistics_args: + """ + Attributes: + - db_name + - tbl_name + - part_name + - col_name + - engine + + """ + + def __init__( + self, + db_name=None, + tbl_name=None, + part_name=None, + col_name=None, + engine=None, + ): + self.db_name = db_name + self.tbl_name = tbl_name + self.part_name = part_name + self.col_name = col_name + self.engine = engine + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.db_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.tbl_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.part_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRING: + self.col_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.STRING: + self.engine = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("delete_partition_column_statistics_args") + if self.db_name is not None: + oprot.writeFieldBegin("db_name", TType.STRING, 1) + oprot.writeString(self.db_name.encode("utf-8") if sys.version_info[0] == 2 else self.db_name) + oprot.writeFieldEnd() + if self.tbl_name is not None: + oprot.writeFieldBegin("tbl_name", TType.STRING, 2) + oprot.writeString(self.tbl_name.encode("utf-8") if sys.version_info[0] == 2 else self.tbl_name) + oprot.writeFieldEnd() + if self.part_name is not None: + oprot.writeFieldBegin("part_name", TType.STRING, 3) + oprot.writeString(self.part_name.encode("utf-8") if sys.version_info[0] == 2 else self.part_name) + oprot.writeFieldEnd() + if self.col_name is not None: + oprot.writeFieldBegin("col_name", TType.STRING, 4) + oprot.writeString(self.col_name.encode("utf-8") if sys.version_info[0] == 2 else self.col_name) + oprot.writeFieldEnd() + if self.engine is not None: + oprot.writeFieldBegin("engine", TType.STRING, 5) + oprot.writeString(self.engine.encode("utf-8") if sys.version_info[0] == 2 else self.engine) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(delete_partition_column_statistics_args) +delete_partition_column_statistics_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "db_name", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "tbl_name", + "UTF8", + None, + ), # 2 + ( + 3, + TType.STRING, + "part_name", + "UTF8", + None, + ), # 3 + ( + 4, + TType.STRING, + "col_name", + "UTF8", + None, + ), # 4 + ( + 5, + TType.STRING, + "engine", + "UTF8", + None, + ), # 5 +) + + +class delete_partition_column_statistics_result: + """ + Attributes: + - success + - o1 + - o2 + - o3 + - o4 + + """ + + def __init__( + self, + success=None, + o1=None, + o2=None, + o3=None, + o4=None, + ): + self.success = success + self.o1 = o1 + self.o2 = o2 + self.o3 = o3 + self.o4 = o4 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.BOOL: + self.success = iprot.readBool() + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = NoSuchObjectException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = MetaException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRUCT: + self.o3 = InvalidObjectException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRUCT: + self.o4 = InvalidInputException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("delete_partition_column_statistics_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.BOOL, 0) + oprot.writeBool(self.success) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + if self.o3 is not None: + oprot.writeFieldBegin("o3", TType.STRUCT, 3) + self.o3.write(oprot) + oprot.writeFieldEnd() + if self.o4 is not None: + oprot.writeFieldBegin("o4", TType.STRUCT, 4) + self.o4.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(delete_partition_column_statistics_result) +delete_partition_column_statistics_result.thrift_spec = ( + ( + 0, + TType.BOOL, + "success", + None, + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [NoSuchObjectException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [MetaException, None], + None, + ), # 2 + ( + 3, + TType.STRUCT, + "o3", + [InvalidObjectException, None], + None, + ), # 3 + ( + 4, + TType.STRUCT, + "o4", + [InvalidInputException, None], + None, + ), # 4 +) + + +class delete_table_column_statistics_args: + """ + Attributes: + - db_name + - tbl_name + - col_name + - engine + + """ + + def __init__( + self, + db_name=None, + tbl_name=None, + col_name=None, + engine=None, + ): + self.db_name = db_name + self.tbl_name = tbl_name + self.col_name = col_name + self.engine = engine + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.db_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.tbl_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.col_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRING: + self.engine = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("delete_table_column_statistics_args") + if self.db_name is not None: + oprot.writeFieldBegin("db_name", TType.STRING, 1) + oprot.writeString(self.db_name.encode("utf-8") if sys.version_info[0] == 2 else self.db_name) + oprot.writeFieldEnd() + if self.tbl_name is not None: + oprot.writeFieldBegin("tbl_name", TType.STRING, 2) + oprot.writeString(self.tbl_name.encode("utf-8") if sys.version_info[0] == 2 else self.tbl_name) + oprot.writeFieldEnd() + if self.col_name is not None: + oprot.writeFieldBegin("col_name", TType.STRING, 3) + oprot.writeString(self.col_name.encode("utf-8") if sys.version_info[0] == 2 else self.col_name) + oprot.writeFieldEnd() + if self.engine is not None: + oprot.writeFieldBegin("engine", TType.STRING, 4) + oprot.writeString(self.engine.encode("utf-8") if sys.version_info[0] == 2 else self.engine) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(delete_table_column_statistics_args) +delete_table_column_statistics_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "db_name", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "tbl_name", + "UTF8", + None, + ), # 2 + ( + 3, + TType.STRING, + "col_name", + "UTF8", + None, + ), # 3 + ( + 4, + TType.STRING, + "engine", + "UTF8", + None, + ), # 4 +) + + +class delete_table_column_statistics_result: + """ + Attributes: + - success + - o1 + - o2 + - o3 + - o4 + + """ + + def __init__( + self, + success=None, + o1=None, + o2=None, + o3=None, + o4=None, + ): + self.success = success + self.o1 = o1 + self.o2 = o2 + self.o3 = o3 + self.o4 = o4 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.BOOL: + self.success = iprot.readBool() + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = NoSuchObjectException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = MetaException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRUCT: + self.o3 = InvalidObjectException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRUCT: + self.o4 = InvalidInputException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("delete_table_column_statistics_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.BOOL, 0) + oprot.writeBool(self.success) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + if self.o3 is not None: + oprot.writeFieldBegin("o3", TType.STRUCT, 3) + self.o3.write(oprot) + oprot.writeFieldEnd() + if self.o4 is not None: + oprot.writeFieldBegin("o4", TType.STRUCT, 4) + self.o4.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(delete_table_column_statistics_result) +delete_table_column_statistics_result.thrift_spec = ( + ( + 0, + TType.BOOL, + "success", + None, + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [NoSuchObjectException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [MetaException, None], + None, + ), # 2 + ( + 3, + TType.STRUCT, + "o3", + [InvalidObjectException, None], + None, + ), # 3 + ( + 4, + TType.STRUCT, + "o4", + [InvalidInputException, None], + None, + ), # 4 +) + + +class create_function_args: + """ + Attributes: + - func + + """ + + def __init__( + self, + func=None, + ): + self.func = func + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.func = Function() + self.func.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("create_function_args") + if self.func is not None: + oprot.writeFieldBegin("func", TType.STRUCT, 1) + self.func.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(create_function_args) +create_function_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "func", + [Function, None], + None, + ), # 1 +) + + +class create_function_result: + """ + Attributes: + - o1 + - o2 + - o3 + - o4 + + """ + + def __init__( + self, + o1=None, + o2=None, + o3=None, + o4=None, + ): + self.o1 = o1 + self.o2 = o2 + self.o3 = o3 + self.o4 = o4 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.o1 = AlreadyExistsException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = InvalidObjectException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRUCT: + self.o3 = MetaException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRUCT: + self.o4 = NoSuchObjectException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("create_function_result") + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + if self.o3 is not None: + oprot.writeFieldBegin("o3", TType.STRUCT, 3) + self.o3.write(oprot) + oprot.writeFieldEnd() + if self.o4 is not None: + oprot.writeFieldBegin("o4", TType.STRUCT, 4) + self.o4.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(create_function_result) +create_function_result.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "o1", + [AlreadyExistsException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [InvalidObjectException, None], + None, + ), # 2 + ( + 3, + TType.STRUCT, + "o3", + [MetaException, None], + None, + ), # 3 + ( + 4, + TType.STRUCT, + "o4", + [NoSuchObjectException, None], + None, + ), # 4 +) + + +class drop_function_args: + """ + Attributes: + - dbName + - funcName + + """ + + def __init__( + self, + dbName=None, + funcName=None, + ): + self.dbName = dbName + self.funcName = funcName + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.dbName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.funcName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("drop_function_args") + if self.dbName is not None: + oprot.writeFieldBegin("dbName", TType.STRING, 1) + oprot.writeString(self.dbName.encode("utf-8") if sys.version_info[0] == 2 else self.dbName) + oprot.writeFieldEnd() + if self.funcName is not None: + oprot.writeFieldBegin("funcName", TType.STRING, 2) + oprot.writeString(self.funcName.encode("utf-8") if sys.version_info[0] == 2 else self.funcName) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(drop_function_args) +drop_function_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "dbName", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "funcName", + "UTF8", + None, + ), # 2 +) + + +class drop_function_result: + """ + Attributes: + - o1 + - o3 + + """ + + def __init__( + self, + o1=None, + o3=None, + ): + self.o1 = o1 + self.o3 = o3 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.o1 = NoSuchObjectException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o3 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("drop_function_result") + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o3 is not None: + oprot.writeFieldBegin("o3", TType.STRUCT, 2) + self.o3.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(drop_function_result) +drop_function_result.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "o1", + [NoSuchObjectException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o3", + [MetaException, None], + None, + ), # 2 +) + + +class alter_function_args: + """ + Attributes: + - dbName + - funcName + - newFunc + + """ + + def __init__( + self, + dbName=None, + funcName=None, + newFunc=None, + ): + self.dbName = dbName + self.funcName = funcName + self.newFunc = newFunc + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.dbName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.funcName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRUCT: + self.newFunc = Function() + self.newFunc.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("alter_function_args") + if self.dbName is not None: + oprot.writeFieldBegin("dbName", TType.STRING, 1) + oprot.writeString(self.dbName.encode("utf-8") if sys.version_info[0] == 2 else self.dbName) + oprot.writeFieldEnd() + if self.funcName is not None: + oprot.writeFieldBegin("funcName", TType.STRING, 2) + oprot.writeString(self.funcName.encode("utf-8") if sys.version_info[0] == 2 else self.funcName) + oprot.writeFieldEnd() + if self.newFunc is not None: + oprot.writeFieldBegin("newFunc", TType.STRUCT, 3) + self.newFunc.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(alter_function_args) +alter_function_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "dbName", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "funcName", + "UTF8", + None, + ), # 2 + ( + 3, + TType.STRUCT, + "newFunc", + [Function, None], + None, + ), # 3 +) + + +class alter_function_result: + """ + Attributes: + - o1 + - o2 + + """ + + def __init__( + self, + o1=None, + o2=None, + ): + self.o1 = o1 + self.o2 = o2 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.o1 = InvalidOperationException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("alter_function_result") + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(alter_function_result) +alter_function_result.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "o1", + [InvalidOperationException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [MetaException, None], + None, + ), # 2 +) + + +class get_functions_args: + """ + Attributes: + - dbName + - pattern + + """ + + def __init__( + self, + dbName=None, + pattern=None, + ): + self.dbName = dbName + self.pattern = pattern + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.dbName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.pattern = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_functions_args") + if self.dbName is not None: + oprot.writeFieldBegin("dbName", TType.STRING, 1) + oprot.writeString(self.dbName.encode("utf-8") if sys.version_info[0] == 2 else self.dbName) + oprot.writeFieldEnd() + if self.pattern is not None: + oprot.writeFieldBegin("pattern", TType.STRING, 2) + oprot.writeString(self.pattern.encode("utf-8") if sys.version_info[0] == 2 else self.pattern) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_functions_args) +get_functions_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "dbName", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "pattern", + "UTF8", + None, + ), # 2 +) + + +class get_functions_result: + """ + Attributes: + - success + - o1 + + """ + + def __init__( + self, + success=None, + o1=None, + ): + self.success = success + self.o1 = o1 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.LIST: + self.success = [] + (_etype1705, _size1702) = iprot.readListBegin() + for _i1706 in range(_size1702): + _elem1707 = ( + iprot.readString().decode("utf-8", errors="replace") + if sys.version_info[0] == 2 + else iprot.readString() + ) + self.success.append(_elem1707) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_functions_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.LIST, 0) + oprot.writeListBegin(TType.STRING, len(self.success)) + for iter1708 in self.success: + oprot.writeString(iter1708.encode("utf-8") if sys.version_info[0] == 2 else iter1708) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_functions_result) +get_functions_result.thrift_spec = ( + ( + 0, + TType.LIST, + "success", + (TType.STRING, "UTF8", False), + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [MetaException, None], + None, + ), # 1 +) + + +class get_function_args: + """ + Attributes: + - dbName + - funcName + + """ + + def __init__( + self, + dbName=None, + funcName=None, + ): + self.dbName = dbName + self.funcName = funcName + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.dbName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.funcName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_function_args") + if self.dbName is not None: + oprot.writeFieldBegin("dbName", TType.STRING, 1) + oprot.writeString(self.dbName.encode("utf-8") if sys.version_info[0] == 2 else self.dbName) + oprot.writeFieldEnd() + if self.funcName is not None: + oprot.writeFieldBegin("funcName", TType.STRING, 2) + oprot.writeString(self.funcName.encode("utf-8") if sys.version_info[0] == 2 else self.funcName) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_function_args) +get_function_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "dbName", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "funcName", + "UTF8", + None, + ), # 2 +) + + +class get_function_result: + """ + Attributes: + - success + - o1 + - o2 + + """ + + def __init__( + self, + success=None, + o1=None, + o2=None, + ): + self.success = success + self.o1 = o1 + self.o2 = o2 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = Function() + self.success.read(iprot) + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = NoSuchObjectException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_function_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_function_result) +get_function_result.thrift_spec = ( + ( + 0, + TType.STRUCT, + "success", + [Function, None], + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [MetaException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [NoSuchObjectException, None], + None, + ), # 2 +) + + +class get_all_functions_args: + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_all_functions_args") + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_all_functions_args) +get_all_functions_args.thrift_spec = () + + +class get_all_functions_result: + """ + Attributes: + - success + - o1 + + """ + + def __init__( + self, + success=None, + o1=None, + ): + self.success = success + self.o1 = o1 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = GetAllFunctionsResponse() + self.success.read(iprot) + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_all_functions_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_all_functions_result) +get_all_functions_result.thrift_spec = ( + ( + 0, + TType.STRUCT, + "success", + [GetAllFunctionsResponse, None], + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [MetaException, None], + None, + ), # 1 +) + + +class create_role_args: + """ + Attributes: + - role + + """ + + def __init__( + self, + role=None, + ): + self.role = role + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.role = Role() + self.role.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("create_role_args") + if self.role is not None: + oprot.writeFieldBegin("role", TType.STRUCT, 1) + self.role.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(create_role_args) +create_role_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "role", + [Role, None], + None, + ), # 1 +) + + +class create_role_result: + """ + Attributes: + - success + - o1 + + """ + + def __init__( + self, + success=None, + o1=None, + ): + self.success = success + self.o1 = o1 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.BOOL: + self.success = iprot.readBool() + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("create_role_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.BOOL, 0) + oprot.writeBool(self.success) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(create_role_result) +create_role_result.thrift_spec = ( + ( + 0, + TType.BOOL, + "success", + None, + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [MetaException, None], + None, + ), # 1 +) + + +class drop_role_args: + """ + Attributes: + - role_name + + """ + + def __init__( + self, + role_name=None, + ): + self.role_name = role_name + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.role_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("drop_role_args") + if self.role_name is not None: + oprot.writeFieldBegin("role_name", TType.STRING, 1) + oprot.writeString(self.role_name.encode("utf-8") if sys.version_info[0] == 2 else self.role_name) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(drop_role_args) +drop_role_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "role_name", + "UTF8", + None, + ), # 1 +) + + +class drop_role_result: + """ + Attributes: + - success + - o1 + + """ + + def __init__( + self, + success=None, + o1=None, + ): + self.success = success + self.o1 = o1 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.BOOL: + self.success = iprot.readBool() + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("drop_role_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.BOOL, 0) + oprot.writeBool(self.success) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(drop_role_result) +drop_role_result.thrift_spec = ( + ( + 0, + TType.BOOL, + "success", + None, + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [MetaException, None], + None, + ), # 1 +) + + +class get_role_names_args: + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_role_names_args") + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_role_names_args) +get_role_names_args.thrift_spec = () + + +class get_role_names_result: + """ + Attributes: + - success + - o1 + + """ + + def __init__( + self, + success=None, + o1=None, + ): + self.success = success + self.o1 = o1 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.LIST: + self.success = [] + (_etype1712, _size1709) = iprot.readListBegin() + for _i1713 in range(_size1709): + _elem1714 = ( + iprot.readString().decode("utf-8", errors="replace") + if sys.version_info[0] == 2 + else iprot.readString() + ) + self.success.append(_elem1714) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_role_names_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.LIST, 0) + oprot.writeListBegin(TType.STRING, len(self.success)) + for iter1715 in self.success: + oprot.writeString(iter1715.encode("utf-8") if sys.version_info[0] == 2 else iter1715) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_role_names_result) +get_role_names_result.thrift_spec = ( + ( + 0, + TType.LIST, + "success", + (TType.STRING, "UTF8", False), + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [MetaException, None], + None, + ), # 1 +) + + +class grant_role_args: + """ + Attributes: + - role_name + - principal_name + - principal_type + - grantor + - grantorType + - grant_option + + """ + + def __init__( + self, + role_name=None, + principal_name=None, + principal_type=None, + grantor=None, + grantorType=None, + grant_option=None, + ): + self.role_name = role_name + self.principal_name = principal_name + self.principal_type = principal_type + self.grantor = grantor + self.grantorType = grantorType + self.grant_option = grant_option + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.role_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.principal_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.I32: + self.principal_type = iprot.readI32() + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRING: + self.grantor = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.I32: + self.grantorType = iprot.readI32() + else: + iprot.skip(ftype) + elif fid == 6: + if ftype == TType.BOOL: + self.grant_option = iprot.readBool() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("grant_role_args") + if self.role_name is not None: + oprot.writeFieldBegin("role_name", TType.STRING, 1) + oprot.writeString(self.role_name.encode("utf-8") if sys.version_info[0] == 2 else self.role_name) + oprot.writeFieldEnd() + if self.principal_name is not None: + oprot.writeFieldBegin("principal_name", TType.STRING, 2) + oprot.writeString(self.principal_name.encode("utf-8") if sys.version_info[0] == 2 else self.principal_name) + oprot.writeFieldEnd() + if self.principal_type is not None: + oprot.writeFieldBegin("principal_type", TType.I32, 3) + oprot.writeI32(self.principal_type) + oprot.writeFieldEnd() + if self.grantor is not None: + oprot.writeFieldBegin("grantor", TType.STRING, 4) + oprot.writeString(self.grantor.encode("utf-8") if sys.version_info[0] == 2 else self.grantor) + oprot.writeFieldEnd() + if self.grantorType is not None: + oprot.writeFieldBegin("grantorType", TType.I32, 5) + oprot.writeI32(self.grantorType) + oprot.writeFieldEnd() + if self.grant_option is not None: + oprot.writeFieldBegin("grant_option", TType.BOOL, 6) + oprot.writeBool(self.grant_option) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(grant_role_args) +grant_role_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "role_name", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "principal_name", + "UTF8", + None, + ), # 2 + ( + 3, + TType.I32, + "principal_type", + None, + None, + ), # 3 + ( + 4, + TType.STRING, + "grantor", + "UTF8", + None, + ), # 4 + ( + 5, + TType.I32, + "grantorType", + None, + None, + ), # 5 + ( + 6, + TType.BOOL, + "grant_option", + None, + None, + ), # 6 +) + + +class grant_role_result: + """ + Attributes: + - success + - o1 + + """ + + def __init__( + self, + success=None, + o1=None, + ): + self.success = success + self.o1 = o1 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.BOOL: + self.success = iprot.readBool() + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("grant_role_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.BOOL, 0) + oprot.writeBool(self.success) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(grant_role_result) +grant_role_result.thrift_spec = ( + ( + 0, + TType.BOOL, + "success", + None, + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [MetaException, None], + None, + ), # 1 +) + + +class revoke_role_args: + """ + Attributes: + - role_name + - principal_name + - principal_type + + """ + + def __init__( + self, + role_name=None, + principal_name=None, + principal_type=None, + ): + self.role_name = role_name + self.principal_name = principal_name + self.principal_type = principal_type + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.role_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.principal_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.I32: + self.principal_type = iprot.readI32() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("revoke_role_args") + if self.role_name is not None: + oprot.writeFieldBegin("role_name", TType.STRING, 1) + oprot.writeString(self.role_name.encode("utf-8") if sys.version_info[0] == 2 else self.role_name) + oprot.writeFieldEnd() + if self.principal_name is not None: + oprot.writeFieldBegin("principal_name", TType.STRING, 2) + oprot.writeString(self.principal_name.encode("utf-8") if sys.version_info[0] == 2 else self.principal_name) + oprot.writeFieldEnd() + if self.principal_type is not None: + oprot.writeFieldBegin("principal_type", TType.I32, 3) + oprot.writeI32(self.principal_type) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(revoke_role_args) +revoke_role_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "role_name", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "principal_name", + "UTF8", + None, + ), # 2 + ( + 3, + TType.I32, + "principal_type", + None, + None, + ), # 3 +) + + +class revoke_role_result: + """ + Attributes: + - success + - o1 + + """ + + def __init__( + self, + success=None, + o1=None, + ): + self.success = success + self.o1 = o1 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.BOOL: + self.success = iprot.readBool() + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("revoke_role_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.BOOL, 0) + oprot.writeBool(self.success) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(revoke_role_result) +revoke_role_result.thrift_spec = ( + ( + 0, + TType.BOOL, + "success", + None, + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [MetaException, None], + None, + ), # 1 +) + + +class list_roles_args: + """ + Attributes: + - principal_name + - principal_type + + """ + + def __init__( + self, + principal_name=None, + principal_type=None, + ): + self.principal_name = principal_name + self.principal_type = principal_type + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.principal_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.I32: + self.principal_type = iprot.readI32() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("list_roles_args") + if self.principal_name is not None: + oprot.writeFieldBegin("principal_name", TType.STRING, 1) + oprot.writeString(self.principal_name.encode("utf-8") if sys.version_info[0] == 2 else self.principal_name) + oprot.writeFieldEnd() + if self.principal_type is not None: + oprot.writeFieldBegin("principal_type", TType.I32, 2) + oprot.writeI32(self.principal_type) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(list_roles_args) +list_roles_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "principal_name", + "UTF8", + None, + ), # 1 + ( + 2, + TType.I32, + "principal_type", + None, + None, + ), # 2 +) + + +class list_roles_result: + """ + Attributes: + - success + - o1 + + """ + + def __init__( + self, + success=None, + o1=None, + ): + self.success = success + self.o1 = o1 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.LIST: + self.success = [] + (_etype1719, _size1716) = iprot.readListBegin() + for _i1720 in range(_size1716): + _elem1721 = Role() + _elem1721.read(iprot) + self.success.append(_elem1721) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("list_roles_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.LIST, 0) + oprot.writeListBegin(TType.STRUCT, len(self.success)) + for iter1722 in self.success: + iter1722.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(list_roles_result) +list_roles_result.thrift_spec = ( + ( + 0, + TType.LIST, + "success", + (TType.STRUCT, [Role, None], False), + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [MetaException, None], + None, + ), # 1 +) + + +class grant_revoke_role_args: + """ + Attributes: + - request + + """ + + def __init__( + self, + request=None, + ): + self.request = request + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.request = GrantRevokeRoleRequest() + self.request.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("grant_revoke_role_args") + if self.request is not None: + oprot.writeFieldBegin("request", TType.STRUCT, 1) + self.request.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(grant_revoke_role_args) +grant_revoke_role_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "request", + [GrantRevokeRoleRequest, None], + None, + ), # 1 +) + + +class grant_revoke_role_result: + """ + Attributes: + - success + - o1 + + """ + + def __init__( + self, + success=None, + o1=None, + ): + self.success = success + self.o1 = o1 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = GrantRevokeRoleResponse() + self.success.read(iprot) + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("grant_revoke_role_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(grant_revoke_role_result) +grant_revoke_role_result.thrift_spec = ( + ( + 0, + TType.STRUCT, + "success", + [GrantRevokeRoleResponse, None], + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [MetaException, None], + None, + ), # 1 +) + + +class get_principals_in_role_args: + """ + Attributes: + - request + + """ + + def __init__( + self, + request=None, + ): + self.request = request + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.request = GetPrincipalsInRoleRequest() + self.request.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_principals_in_role_args") + if self.request is not None: + oprot.writeFieldBegin("request", TType.STRUCT, 1) + self.request.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_principals_in_role_args) +get_principals_in_role_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "request", + [GetPrincipalsInRoleRequest, None], + None, + ), # 1 +) + + +class get_principals_in_role_result: + """ + Attributes: + - success + - o1 + + """ + + def __init__( + self, + success=None, + o1=None, + ): + self.success = success + self.o1 = o1 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = GetPrincipalsInRoleResponse() + self.success.read(iprot) + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_principals_in_role_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_principals_in_role_result) +get_principals_in_role_result.thrift_spec = ( + ( + 0, + TType.STRUCT, + "success", + [GetPrincipalsInRoleResponse, None], + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [MetaException, None], + None, + ), # 1 +) + + +class get_role_grants_for_principal_args: + """ + Attributes: + - request + + """ + + def __init__( + self, + request=None, + ): + self.request = request + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.request = GetRoleGrantsForPrincipalRequest() + self.request.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_role_grants_for_principal_args") + if self.request is not None: + oprot.writeFieldBegin("request", TType.STRUCT, 1) + self.request.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_role_grants_for_principal_args) +get_role_grants_for_principal_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "request", + [GetRoleGrantsForPrincipalRequest, None], + None, + ), # 1 +) + + +class get_role_grants_for_principal_result: + """ + Attributes: + - success + - o1 + + """ + + def __init__( + self, + success=None, + o1=None, + ): + self.success = success + self.o1 = o1 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = GetRoleGrantsForPrincipalResponse() + self.success.read(iprot) + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_role_grants_for_principal_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_role_grants_for_principal_result) +get_role_grants_for_principal_result.thrift_spec = ( + ( + 0, + TType.STRUCT, + "success", + [GetRoleGrantsForPrincipalResponse, None], + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [MetaException, None], + None, + ), # 1 +) + + +class get_privilege_set_args: + """ + Attributes: + - hiveObject + - user_name + - group_names + + """ + + def __init__( + self, + hiveObject=None, + user_name=None, + group_names=None, + ): + self.hiveObject = hiveObject + self.user_name = user_name + self.group_names = group_names + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.hiveObject = HiveObjectRef() + self.hiveObject.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.user_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.LIST: + self.group_names = [] + (_etype1726, _size1723) = iprot.readListBegin() + for _i1727 in range(_size1723): + _elem1728 = ( + iprot.readString().decode("utf-8", errors="replace") + if sys.version_info[0] == 2 + else iprot.readString() + ) + self.group_names.append(_elem1728) + iprot.readListEnd() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_privilege_set_args") + if self.hiveObject is not None: + oprot.writeFieldBegin("hiveObject", TType.STRUCT, 1) + self.hiveObject.write(oprot) + oprot.writeFieldEnd() + if self.user_name is not None: + oprot.writeFieldBegin("user_name", TType.STRING, 2) + oprot.writeString(self.user_name.encode("utf-8") if sys.version_info[0] == 2 else self.user_name) + oprot.writeFieldEnd() + if self.group_names is not None: + oprot.writeFieldBegin("group_names", TType.LIST, 3) + oprot.writeListBegin(TType.STRING, len(self.group_names)) + for iter1729 in self.group_names: + oprot.writeString(iter1729.encode("utf-8") if sys.version_info[0] == 2 else iter1729) + oprot.writeListEnd() + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_privilege_set_args) +get_privilege_set_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "hiveObject", + [HiveObjectRef, None], + None, + ), # 1 + ( + 2, + TType.STRING, + "user_name", + "UTF8", + None, + ), # 2 + ( + 3, + TType.LIST, + "group_names", + (TType.STRING, "UTF8", False), + None, + ), # 3 +) + + +class get_privilege_set_result: + """ + Attributes: + - success + - o1 + + """ + + def __init__( + self, + success=None, + o1=None, + ): + self.success = success + self.o1 = o1 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = PrincipalPrivilegeSet() + self.success.read(iprot) + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_privilege_set_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_privilege_set_result) +get_privilege_set_result.thrift_spec = ( + ( + 0, + TType.STRUCT, + "success", + [PrincipalPrivilegeSet, None], + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [MetaException, None], + None, + ), # 1 +) + + +class list_privileges_args: + """ + Attributes: + - principal_name + - principal_type + - hiveObject + + """ + + def __init__( + self, + principal_name=None, + principal_type=None, + hiveObject=None, + ): + self.principal_name = principal_name + self.principal_type = principal_type + self.hiveObject = hiveObject + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.principal_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.I32: + self.principal_type = iprot.readI32() + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRUCT: + self.hiveObject = HiveObjectRef() + self.hiveObject.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("list_privileges_args") + if self.principal_name is not None: + oprot.writeFieldBegin("principal_name", TType.STRING, 1) + oprot.writeString(self.principal_name.encode("utf-8") if sys.version_info[0] == 2 else self.principal_name) + oprot.writeFieldEnd() + if self.principal_type is not None: + oprot.writeFieldBegin("principal_type", TType.I32, 2) + oprot.writeI32(self.principal_type) + oprot.writeFieldEnd() + if self.hiveObject is not None: + oprot.writeFieldBegin("hiveObject", TType.STRUCT, 3) + self.hiveObject.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(list_privileges_args) +list_privileges_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "principal_name", + "UTF8", + None, + ), # 1 + ( + 2, + TType.I32, + "principal_type", + None, + None, + ), # 2 + ( + 3, + TType.STRUCT, + "hiveObject", + [HiveObjectRef, None], + None, + ), # 3 +) + + +class list_privileges_result: + """ + Attributes: + - success + - o1 + + """ + + def __init__( + self, + success=None, + o1=None, + ): + self.success = success + self.o1 = o1 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.LIST: + self.success = [] + (_etype1733, _size1730) = iprot.readListBegin() + for _i1734 in range(_size1730): + _elem1735 = HiveObjectPrivilege() + _elem1735.read(iprot) + self.success.append(_elem1735) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("list_privileges_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.LIST, 0) + oprot.writeListBegin(TType.STRUCT, len(self.success)) + for iter1736 in self.success: + iter1736.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(list_privileges_result) +list_privileges_result.thrift_spec = ( + ( + 0, + TType.LIST, + "success", + (TType.STRUCT, [HiveObjectPrivilege, None], False), + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [MetaException, None], + None, + ), # 1 +) + + +class grant_privileges_args: + """ + Attributes: + - privileges + + """ + + def __init__( + self, + privileges=None, + ): + self.privileges = privileges + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.privileges = PrivilegeBag() + self.privileges.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("grant_privileges_args") + if self.privileges is not None: + oprot.writeFieldBegin("privileges", TType.STRUCT, 1) + self.privileges.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(grant_privileges_args) +grant_privileges_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "privileges", + [PrivilegeBag, None], + None, + ), # 1 +) + + +class grant_privileges_result: + """ + Attributes: + - success + - o1 + + """ + + def __init__( + self, + success=None, + o1=None, + ): + self.success = success + self.o1 = o1 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.BOOL: + self.success = iprot.readBool() + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("grant_privileges_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.BOOL, 0) + oprot.writeBool(self.success) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(grant_privileges_result) +grant_privileges_result.thrift_spec = ( + ( + 0, + TType.BOOL, + "success", + None, + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [MetaException, None], + None, + ), # 1 +) + + +class revoke_privileges_args: + """ + Attributes: + - privileges + + """ + + def __init__( + self, + privileges=None, + ): + self.privileges = privileges + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.privileges = PrivilegeBag() + self.privileges.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("revoke_privileges_args") + if self.privileges is not None: + oprot.writeFieldBegin("privileges", TType.STRUCT, 1) + self.privileges.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(revoke_privileges_args) +revoke_privileges_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "privileges", + [PrivilegeBag, None], + None, + ), # 1 +) + + +class revoke_privileges_result: + """ + Attributes: + - success + - o1 + + """ + + def __init__( + self, + success=None, + o1=None, + ): + self.success = success + self.o1 = o1 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.BOOL: + self.success = iprot.readBool() + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("revoke_privileges_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.BOOL, 0) + oprot.writeBool(self.success) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(revoke_privileges_result) +revoke_privileges_result.thrift_spec = ( + ( + 0, + TType.BOOL, + "success", + None, + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [MetaException, None], + None, + ), # 1 +) + + +class grant_revoke_privileges_args: + """ + Attributes: + - request + + """ + + def __init__( + self, + request=None, + ): + self.request = request + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.request = GrantRevokePrivilegeRequest() + self.request.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("grant_revoke_privileges_args") + if self.request is not None: + oprot.writeFieldBegin("request", TType.STRUCT, 1) + self.request.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(grant_revoke_privileges_args) +grant_revoke_privileges_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "request", + [GrantRevokePrivilegeRequest, None], + None, + ), # 1 +) + + +class grant_revoke_privileges_result: + """ + Attributes: + - success + - o1 + + """ + + def __init__( + self, + success=None, + o1=None, + ): + self.success = success + self.o1 = o1 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = GrantRevokePrivilegeResponse() + self.success.read(iprot) + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("grant_revoke_privileges_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(grant_revoke_privileges_result) +grant_revoke_privileges_result.thrift_spec = ( + ( + 0, + TType.STRUCT, + "success", + [GrantRevokePrivilegeResponse, None], + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [MetaException, None], + None, + ), # 1 +) + + +class refresh_privileges_args: + """ + Attributes: + - objToRefresh + - authorizer + - grantRequest + + """ + + def __init__( + self, + objToRefresh=None, + authorizer=None, + grantRequest=None, + ): + self.objToRefresh = objToRefresh + self.authorizer = authorizer + self.grantRequest = grantRequest + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.objToRefresh = HiveObjectRef() + self.objToRefresh.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.authorizer = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRUCT: + self.grantRequest = GrantRevokePrivilegeRequest() + self.grantRequest.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("refresh_privileges_args") + if self.objToRefresh is not None: + oprot.writeFieldBegin("objToRefresh", TType.STRUCT, 1) + self.objToRefresh.write(oprot) + oprot.writeFieldEnd() + if self.authorizer is not None: + oprot.writeFieldBegin("authorizer", TType.STRING, 2) + oprot.writeString(self.authorizer.encode("utf-8") if sys.version_info[0] == 2 else self.authorizer) + oprot.writeFieldEnd() + if self.grantRequest is not None: + oprot.writeFieldBegin("grantRequest", TType.STRUCT, 3) + self.grantRequest.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(refresh_privileges_args) +refresh_privileges_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "objToRefresh", + [HiveObjectRef, None], + None, + ), # 1 + ( + 2, + TType.STRING, + "authorizer", + "UTF8", + None, + ), # 2 + ( + 3, + TType.STRUCT, + "grantRequest", + [GrantRevokePrivilegeRequest, None], + None, + ), # 3 +) + + +class refresh_privileges_result: + """ + Attributes: + - success + - o1 + + """ + + def __init__( + self, + success=None, + o1=None, + ): + self.success = success + self.o1 = o1 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = GrantRevokePrivilegeResponse() + self.success.read(iprot) + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("refresh_privileges_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(refresh_privileges_result) +refresh_privileges_result.thrift_spec = ( + ( + 0, + TType.STRUCT, + "success", + [GrantRevokePrivilegeResponse, None], + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [MetaException, None], + None, + ), # 1 +) + + +class set_ugi_args: + """ + Attributes: + - user_name + - group_names + + """ + + def __init__( + self, + user_name=None, + group_names=None, + ): + self.user_name = user_name + self.group_names = group_names + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.user_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.LIST: + self.group_names = [] + (_etype1740, _size1737) = iprot.readListBegin() + for _i1741 in range(_size1737): + _elem1742 = ( + iprot.readString().decode("utf-8", errors="replace") + if sys.version_info[0] == 2 + else iprot.readString() + ) + self.group_names.append(_elem1742) + iprot.readListEnd() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("set_ugi_args") + if self.user_name is not None: + oprot.writeFieldBegin("user_name", TType.STRING, 1) + oprot.writeString(self.user_name.encode("utf-8") if sys.version_info[0] == 2 else self.user_name) + oprot.writeFieldEnd() + if self.group_names is not None: + oprot.writeFieldBegin("group_names", TType.LIST, 2) + oprot.writeListBegin(TType.STRING, len(self.group_names)) + for iter1743 in self.group_names: + oprot.writeString(iter1743.encode("utf-8") if sys.version_info[0] == 2 else iter1743) + oprot.writeListEnd() + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(set_ugi_args) +set_ugi_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "user_name", + "UTF8", + None, + ), # 1 + ( + 2, + TType.LIST, + "group_names", + (TType.STRING, "UTF8", False), + None, + ), # 2 +) + + +class set_ugi_result: + """ + Attributes: + - success + - o1 + + """ + + def __init__( + self, + success=None, + o1=None, + ): + self.success = success + self.o1 = o1 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.LIST: + self.success = [] + (_etype1747, _size1744) = iprot.readListBegin() + for _i1748 in range(_size1744): + _elem1749 = ( + iprot.readString().decode("utf-8", errors="replace") + if sys.version_info[0] == 2 + else iprot.readString() + ) + self.success.append(_elem1749) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("set_ugi_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.LIST, 0) + oprot.writeListBegin(TType.STRING, len(self.success)) + for iter1750 in self.success: + oprot.writeString(iter1750.encode("utf-8") if sys.version_info[0] == 2 else iter1750) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(set_ugi_result) +set_ugi_result.thrift_spec = ( + ( + 0, + TType.LIST, + "success", + (TType.STRING, "UTF8", False), + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [MetaException, None], + None, + ), # 1 +) + + +class get_delegation_token_args: + """ + Attributes: + - token_owner + - renewer_kerberos_principal_name + + """ + + def __init__( + self, + token_owner=None, + renewer_kerberos_principal_name=None, + ): + self.token_owner = token_owner + self.renewer_kerberos_principal_name = renewer_kerberos_principal_name + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.token_owner = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.renewer_kerberos_principal_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_delegation_token_args") + if self.token_owner is not None: + oprot.writeFieldBegin("token_owner", TType.STRING, 1) + oprot.writeString(self.token_owner.encode("utf-8") if sys.version_info[0] == 2 else self.token_owner) + oprot.writeFieldEnd() + if self.renewer_kerberos_principal_name is not None: + oprot.writeFieldBegin("renewer_kerberos_principal_name", TType.STRING, 2) + oprot.writeString( + self.renewer_kerberos_principal_name.encode("utf-8") + if sys.version_info[0] == 2 + else self.renewer_kerberos_principal_name + ) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_delegation_token_args) +get_delegation_token_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "token_owner", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "renewer_kerberos_principal_name", + "UTF8", + None, + ), # 2 +) + + +class get_delegation_token_result: + """ + Attributes: + - success + - o1 + + """ + + def __init__( + self, + success=None, + o1=None, + ): + self.success = success + self.o1 = o1 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRING: + self.success = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_delegation_token_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.STRING, 0) + oprot.writeString(self.success.encode("utf-8") if sys.version_info[0] == 2 else self.success) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_delegation_token_result) +get_delegation_token_result.thrift_spec = ( + ( + 0, + TType.STRING, + "success", + "UTF8", + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [MetaException, None], + None, + ), # 1 +) + + +class renew_delegation_token_args: + """ + Attributes: + - token_str_form + + """ + + def __init__( + self, + token_str_form=None, + ): + self.token_str_form = token_str_form + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.token_str_form = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("renew_delegation_token_args") + if self.token_str_form is not None: + oprot.writeFieldBegin("token_str_form", TType.STRING, 1) + oprot.writeString(self.token_str_form.encode("utf-8") if sys.version_info[0] == 2 else self.token_str_form) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(renew_delegation_token_args) +renew_delegation_token_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "token_str_form", + "UTF8", + None, + ), # 1 +) + + +class renew_delegation_token_result: + """ + Attributes: + - success + - o1 + + """ + + def __init__( + self, + success=None, + o1=None, + ): + self.success = success + self.o1 = o1 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.I64: + self.success = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("renew_delegation_token_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.I64, 0) + oprot.writeI64(self.success) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(renew_delegation_token_result) +renew_delegation_token_result.thrift_spec = ( + ( + 0, + TType.I64, + "success", + None, + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [MetaException, None], + None, + ), # 1 +) + + +class cancel_delegation_token_args: + """ + Attributes: + - token_str_form + + """ + + def __init__( + self, + token_str_form=None, + ): + self.token_str_form = token_str_form + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.token_str_form = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("cancel_delegation_token_args") + if self.token_str_form is not None: + oprot.writeFieldBegin("token_str_form", TType.STRING, 1) + oprot.writeString(self.token_str_form.encode("utf-8") if sys.version_info[0] == 2 else self.token_str_form) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(cancel_delegation_token_args) +cancel_delegation_token_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "token_str_form", + "UTF8", + None, + ), # 1 +) + + +class cancel_delegation_token_result: + """ + Attributes: + - o1 + + """ + + def __init__( + self, + o1=None, + ): + self.o1 = o1 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("cancel_delegation_token_result") + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(cancel_delegation_token_result) +cancel_delegation_token_result.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "o1", + [MetaException, None], + None, + ), # 1 +) + + +class add_token_args: + """ + Attributes: + - token_identifier + - delegation_token + + """ + + def __init__( + self, + token_identifier=None, + delegation_token=None, + ): + self.token_identifier = token_identifier + self.delegation_token = delegation_token + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.token_identifier = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.delegation_token = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("add_token_args") + if self.token_identifier is not None: + oprot.writeFieldBegin("token_identifier", TType.STRING, 1) + oprot.writeString(self.token_identifier.encode("utf-8") if sys.version_info[0] == 2 else self.token_identifier) + oprot.writeFieldEnd() + if self.delegation_token is not None: + oprot.writeFieldBegin("delegation_token", TType.STRING, 2) + oprot.writeString(self.delegation_token.encode("utf-8") if sys.version_info[0] == 2 else self.delegation_token) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(add_token_args) +add_token_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "token_identifier", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "delegation_token", + "UTF8", + None, + ), # 2 +) + + +class add_token_result: + """ + Attributes: + - success + + """ + + def __init__( + self, + success=None, + ): + self.success = success + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.BOOL: + self.success = iprot.readBool() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("add_token_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.BOOL, 0) + oprot.writeBool(self.success) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(add_token_result) +add_token_result.thrift_spec = ( + ( + 0, + TType.BOOL, + "success", + None, + None, + ), # 0 +) + + +class remove_token_args: + """ + Attributes: + - token_identifier + + """ + + def __init__( + self, + token_identifier=None, + ): + self.token_identifier = token_identifier + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.token_identifier = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("remove_token_args") + if self.token_identifier is not None: + oprot.writeFieldBegin("token_identifier", TType.STRING, 1) + oprot.writeString(self.token_identifier.encode("utf-8") if sys.version_info[0] == 2 else self.token_identifier) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(remove_token_args) +remove_token_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "token_identifier", + "UTF8", + None, + ), # 1 +) + + +class remove_token_result: + """ + Attributes: + - success + + """ + + def __init__( + self, + success=None, + ): + self.success = success + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.BOOL: + self.success = iprot.readBool() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("remove_token_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.BOOL, 0) + oprot.writeBool(self.success) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(remove_token_result) +remove_token_result.thrift_spec = ( + ( + 0, + TType.BOOL, + "success", + None, + None, + ), # 0 +) + + +class get_token_args: + """ + Attributes: + - token_identifier + + """ + + def __init__( + self, + token_identifier=None, + ): + self.token_identifier = token_identifier + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.token_identifier = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_token_args") + if self.token_identifier is not None: + oprot.writeFieldBegin("token_identifier", TType.STRING, 1) + oprot.writeString(self.token_identifier.encode("utf-8") if sys.version_info[0] == 2 else self.token_identifier) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_token_args) +get_token_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "token_identifier", + "UTF8", + None, + ), # 1 +) + + +class get_token_result: + """ + Attributes: + - success + + """ + + def __init__( + self, + success=None, + ): + self.success = success + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRING: + self.success = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_token_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.STRING, 0) + oprot.writeString(self.success.encode("utf-8") if sys.version_info[0] == 2 else self.success) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_token_result) +get_token_result.thrift_spec = ( + ( + 0, + TType.STRING, + "success", + "UTF8", + None, + ), # 0 +) + + +class get_all_token_identifiers_args: + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_all_token_identifiers_args") + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_all_token_identifiers_args) +get_all_token_identifiers_args.thrift_spec = () + + +class get_all_token_identifiers_result: + """ + Attributes: + - success + + """ + + def __init__( + self, + success=None, + ): + self.success = success + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.LIST: + self.success = [] + (_etype1754, _size1751) = iprot.readListBegin() + for _i1755 in range(_size1751): + _elem1756 = ( + iprot.readString().decode("utf-8", errors="replace") + if sys.version_info[0] == 2 + else iprot.readString() + ) + self.success.append(_elem1756) + iprot.readListEnd() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_all_token_identifiers_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.LIST, 0) + oprot.writeListBegin(TType.STRING, len(self.success)) + for iter1757 in self.success: + oprot.writeString(iter1757.encode("utf-8") if sys.version_info[0] == 2 else iter1757) + oprot.writeListEnd() + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_all_token_identifiers_result) +get_all_token_identifiers_result.thrift_spec = ( + ( + 0, + TType.LIST, + "success", + (TType.STRING, "UTF8", False), + None, + ), # 0 +) + + +class add_master_key_args: + """ + Attributes: + - key + + """ + + def __init__( + self, + key=None, + ): + self.key = key + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.key = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("add_master_key_args") + if self.key is not None: + oprot.writeFieldBegin("key", TType.STRING, 1) + oprot.writeString(self.key.encode("utf-8") if sys.version_info[0] == 2 else self.key) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(add_master_key_args) +add_master_key_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "key", + "UTF8", + None, + ), # 1 +) + + +class add_master_key_result: + """ + Attributes: + - success + - o1 + + """ + + def __init__( + self, + success=None, + o1=None, + ): + self.success = success + self.o1 = o1 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.I32: + self.success = iprot.readI32() + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("add_master_key_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.I32, 0) + oprot.writeI32(self.success) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(add_master_key_result) +add_master_key_result.thrift_spec = ( + ( + 0, + TType.I32, + "success", + None, + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [MetaException, None], + None, + ), # 1 +) + + +class update_master_key_args: + """ + Attributes: + - seq_number + - key + + """ + + def __init__( + self, + seq_number=None, + key=None, + ): + self.seq_number = seq_number + self.key = key + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.I32: + self.seq_number = iprot.readI32() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.key = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("update_master_key_args") + if self.seq_number is not None: + oprot.writeFieldBegin("seq_number", TType.I32, 1) + oprot.writeI32(self.seq_number) + oprot.writeFieldEnd() + if self.key is not None: + oprot.writeFieldBegin("key", TType.STRING, 2) + oprot.writeString(self.key.encode("utf-8") if sys.version_info[0] == 2 else self.key) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(update_master_key_args) +update_master_key_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.I32, + "seq_number", + None, + None, + ), # 1 + ( + 2, + TType.STRING, + "key", + "UTF8", + None, + ), # 2 +) + + +class update_master_key_result: + """ + Attributes: + - o1 + - o2 + + """ + + def __init__( + self, + o1=None, + o2=None, + ): + self.o1 = o1 + self.o2 = o2 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.o1 = NoSuchObjectException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("update_master_key_result") + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(update_master_key_result) +update_master_key_result.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "o1", + [NoSuchObjectException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [MetaException, None], + None, + ), # 2 +) + + +class remove_master_key_args: + """ + Attributes: + - key_seq + + """ + + def __init__( + self, + key_seq=None, + ): + self.key_seq = key_seq + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.I32: + self.key_seq = iprot.readI32() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("remove_master_key_args") + if self.key_seq is not None: + oprot.writeFieldBegin("key_seq", TType.I32, 1) + oprot.writeI32(self.key_seq) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(remove_master_key_args) +remove_master_key_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.I32, + "key_seq", + None, + None, + ), # 1 +) + + +class remove_master_key_result: + """ + Attributes: + - success + + """ + + def __init__( + self, + success=None, + ): + self.success = success + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.BOOL: + self.success = iprot.readBool() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("remove_master_key_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.BOOL, 0) + oprot.writeBool(self.success) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(remove_master_key_result) +remove_master_key_result.thrift_spec = ( + ( + 0, + TType.BOOL, + "success", + None, + None, + ), # 0 +) + + +class get_master_keys_args: + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_master_keys_args") + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_master_keys_args) +get_master_keys_args.thrift_spec = () + + +class get_master_keys_result: + """ + Attributes: + - success + + """ + + def __init__( + self, + success=None, + ): + self.success = success + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.LIST: + self.success = [] + (_etype1761, _size1758) = iprot.readListBegin() + for _i1762 in range(_size1758): + _elem1763 = ( + iprot.readString().decode("utf-8", errors="replace") + if sys.version_info[0] == 2 + else iprot.readString() + ) + self.success.append(_elem1763) + iprot.readListEnd() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_master_keys_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.LIST, 0) + oprot.writeListBegin(TType.STRING, len(self.success)) + for iter1764 in self.success: + oprot.writeString(iter1764.encode("utf-8") if sys.version_info[0] == 2 else iter1764) + oprot.writeListEnd() + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_master_keys_result) +get_master_keys_result.thrift_spec = ( + ( + 0, + TType.LIST, + "success", + (TType.STRING, "UTF8", False), + None, + ), # 0 +) + + +class get_open_txns_args: + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_open_txns_args") + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_open_txns_args) +get_open_txns_args.thrift_spec = () + + +class get_open_txns_result: + """ + Attributes: + - success + + """ + + def __init__( + self, + success=None, + ): + self.success = success + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = GetOpenTxnsResponse() + self.success.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_open_txns_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_open_txns_result) +get_open_txns_result.thrift_spec = ( + ( + 0, + TType.STRUCT, + "success", + [GetOpenTxnsResponse, None], + None, + ), # 0 +) + + +class get_open_txns_info_args: + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_open_txns_info_args") + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_open_txns_info_args) +get_open_txns_info_args.thrift_spec = () + + +class get_open_txns_info_result: + """ + Attributes: + - success + + """ + + def __init__( + self, + success=None, + ): + self.success = success + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = GetOpenTxnsInfoResponse() + self.success.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_open_txns_info_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_open_txns_info_result) +get_open_txns_info_result.thrift_spec = ( + ( + 0, + TType.STRUCT, + "success", + [GetOpenTxnsInfoResponse, None], + None, + ), # 0 +) + + +class open_txns_args: + """ + Attributes: + - rqst + + """ + + def __init__( + self, + rqst=None, + ): + self.rqst = rqst + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.rqst = OpenTxnRequest() + self.rqst.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("open_txns_args") + if self.rqst is not None: + oprot.writeFieldBegin("rqst", TType.STRUCT, 1) + self.rqst.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(open_txns_args) +open_txns_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "rqst", + [OpenTxnRequest, None], + None, + ), # 1 +) + + +class open_txns_result: + """ + Attributes: + - success + + """ + + def __init__( + self, + success=None, + ): + self.success = success + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = OpenTxnsResponse() + self.success.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("open_txns_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(open_txns_result) +open_txns_result.thrift_spec = ( + ( + 0, + TType.STRUCT, + "success", + [OpenTxnsResponse, None], + None, + ), # 0 +) + + +class abort_txn_args: + """ + Attributes: + - rqst + + """ + + def __init__( + self, + rqst=None, + ): + self.rqst = rqst + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.rqst = AbortTxnRequest() + self.rqst.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("abort_txn_args") + if self.rqst is not None: + oprot.writeFieldBegin("rqst", TType.STRUCT, 1) + self.rqst.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(abort_txn_args) +abort_txn_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "rqst", + [AbortTxnRequest, None], + None, + ), # 1 +) + + +class abort_txn_result: + """ + Attributes: + - o1 + + """ + + def __init__( + self, + o1=None, + ): + self.o1 = o1 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.o1 = NoSuchTxnException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("abort_txn_result") + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(abort_txn_result) +abort_txn_result.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "o1", + [NoSuchTxnException, None], + None, + ), # 1 +) + + +class abort_txns_args: + """ + Attributes: + - rqst + + """ + + def __init__( + self, + rqst=None, + ): + self.rqst = rqst + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.rqst = AbortTxnsRequest() + self.rqst.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("abort_txns_args") + if self.rqst is not None: + oprot.writeFieldBegin("rqst", TType.STRUCT, 1) + self.rqst.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(abort_txns_args) +abort_txns_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "rqst", + [AbortTxnsRequest, None], + None, + ), # 1 +) + + +class abort_txns_result: + """ + Attributes: + - o1 + + """ + + def __init__( + self, + o1=None, + ): + self.o1 = o1 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.o1 = NoSuchTxnException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("abort_txns_result") + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(abort_txns_result) +abort_txns_result.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "o1", + [NoSuchTxnException, None], + None, + ), # 1 +) + + +class commit_txn_args: + """ + Attributes: + - rqst + + """ + + def __init__( + self, + rqst=None, + ): + self.rqst = rqst + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.rqst = CommitTxnRequest() + self.rqst.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("commit_txn_args") + if self.rqst is not None: + oprot.writeFieldBegin("rqst", TType.STRUCT, 1) + self.rqst.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(commit_txn_args) +commit_txn_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "rqst", + [CommitTxnRequest, None], + None, + ), # 1 +) + + +class commit_txn_result: + """ + Attributes: + - o1 + - o2 + + """ + + def __init__( + self, + o1=None, + o2=None, + ): + self.o1 = o1 + self.o2 = o2 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.o1 = NoSuchTxnException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = TxnAbortedException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("commit_txn_result") + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(commit_txn_result) +commit_txn_result.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "o1", + [NoSuchTxnException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [TxnAbortedException, None], + None, + ), # 2 +) + + +class get_latest_txnid_in_conflict_args: + """ + Attributes: + - txnId + + """ + + def __init__( + self, + txnId=None, + ): + self.txnId = txnId + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.I64: + self.txnId = iprot.readI64() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_latest_txnid_in_conflict_args") + if self.txnId is not None: + oprot.writeFieldBegin("txnId", TType.I64, 1) + oprot.writeI64(self.txnId) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_latest_txnid_in_conflict_args) +get_latest_txnid_in_conflict_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.I64, + "txnId", + None, + None, + ), # 1 +) + + +class get_latest_txnid_in_conflict_result: + """ + Attributes: + - success + - o1 + + """ + + def __init__( + self, + success=None, + o1=None, + ): + self.success = success + self.o1 = o1 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.I64: + self.success = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_latest_txnid_in_conflict_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.I64, 0) + oprot.writeI64(self.success) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_latest_txnid_in_conflict_result) +get_latest_txnid_in_conflict_result.thrift_spec = ( + ( + 0, + TType.I64, + "success", + None, + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [MetaException, None], + None, + ), # 1 +) + + +class repl_tbl_writeid_state_args: + """ + Attributes: + - rqst + + """ + + def __init__( + self, + rqst=None, + ): + self.rqst = rqst + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.rqst = ReplTblWriteIdStateRequest() + self.rqst.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("repl_tbl_writeid_state_args") + if self.rqst is not None: + oprot.writeFieldBegin("rqst", TType.STRUCT, 1) + self.rqst.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(repl_tbl_writeid_state_args) +repl_tbl_writeid_state_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "rqst", + [ReplTblWriteIdStateRequest, None], + None, + ), # 1 +) + + +class repl_tbl_writeid_state_result: + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("repl_tbl_writeid_state_result") + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(repl_tbl_writeid_state_result) +repl_tbl_writeid_state_result.thrift_spec = () + + +class get_valid_write_ids_args: + """ + Attributes: + - rqst + + """ + + def __init__( + self, + rqst=None, + ): + self.rqst = rqst + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.rqst = GetValidWriteIdsRequest() + self.rqst.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_valid_write_ids_args") + if self.rqst is not None: + oprot.writeFieldBegin("rqst", TType.STRUCT, 1) + self.rqst.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_valid_write_ids_args) +get_valid_write_ids_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "rqst", + [GetValidWriteIdsRequest, None], + None, + ), # 1 +) + + +class get_valid_write_ids_result: + """ + Attributes: + - success + - o1 + - o2 + + """ + + def __init__( + self, + success=None, + o1=None, + o2=None, + ): + self.success = success + self.o1 = o1 + self.o2 = o2 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = GetValidWriteIdsResponse() + self.success.read(iprot) + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = NoSuchTxnException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_valid_write_ids_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_valid_write_ids_result) +get_valid_write_ids_result.thrift_spec = ( + ( + 0, + TType.STRUCT, + "success", + [GetValidWriteIdsResponse, None], + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [NoSuchTxnException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [MetaException, None], + None, + ), # 2 +) + + +class allocate_table_write_ids_args: + """ + Attributes: + - rqst + + """ + + def __init__( + self, + rqst=None, + ): + self.rqst = rqst + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.rqst = AllocateTableWriteIdsRequest() + self.rqst.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("allocate_table_write_ids_args") + if self.rqst is not None: + oprot.writeFieldBegin("rqst", TType.STRUCT, 1) + self.rqst.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(allocate_table_write_ids_args) +allocate_table_write_ids_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "rqst", + [AllocateTableWriteIdsRequest, None], + None, + ), # 1 +) + + +class allocate_table_write_ids_result: + """ + Attributes: + - success + - o1 + - o2 + - o3 + + """ + + def __init__( + self, + success=None, + o1=None, + o2=None, + o3=None, + ): + self.success = success + self.o1 = o1 + self.o2 = o2 + self.o3 = o3 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = AllocateTableWriteIdsResponse() + self.success.read(iprot) + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = NoSuchTxnException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = TxnAbortedException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRUCT: + self.o3 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("allocate_table_write_ids_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + if self.o3 is not None: + oprot.writeFieldBegin("o3", TType.STRUCT, 3) + self.o3.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(allocate_table_write_ids_result) +allocate_table_write_ids_result.thrift_spec = ( + ( + 0, + TType.STRUCT, + "success", + [AllocateTableWriteIdsResponse, None], + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [NoSuchTxnException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [TxnAbortedException, None], + None, + ), # 2 + ( + 3, + TType.STRUCT, + "o3", + [MetaException, None], + None, + ), # 3 +) + + +class get_max_allocated_table_write_id_args: + """ + Attributes: + - rqst + + """ + + def __init__( + self, + rqst=None, + ): + self.rqst = rqst + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.rqst = MaxAllocatedTableWriteIdRequest() + self.rqst.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_max_allocated_table_write_id_args") + if self.rqst is not None: + oprot.writeFieldBegin("rqst", TType.STRUCT, 1) + self.rqst.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_max_allocated_table_write_id_args) +get_max_allocated_table_write_id_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "rqst", + [MaxAllocatedTableWriteIdRequest, None], + None, + ), # 1 +) + + +class get_max_allocated_table_write_id_result: + """ + Attributes: + - success + - o1 + + """ + + def __init__( + self, + success=None, + o1=None, + ): + self.success = success + self.o1 = o1 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = MaxAllocatedTableWriteIdResponse() + self.success.read(iprot) + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_max_allocated_table_write_id_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_max_allocated_table_write_id_result) +get_max_allocated_table_write_id_result.thrift_spec = ( + ( + 0, + TType.STRUCT, + "success", + [MaxAllocatedTableWriteIdResponse, None], + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [MetaException, None], + None, + ), # 1 +) + + +class seed_write_id_args: + """ + Attributes: + - rqst + + """ + + def __init__( + self, + rqst=None, + ): + self.rqst = rqst + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.rqst = SeedTableWriteIdsRequest() + self.rqst.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("seed_write_id_args") + if self.rqst is not None: + oprot.writeFieldBegin("rqst", TType.STRUCT, 1) + self.rqst.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(seed_write_id_args) +seed_write_id_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "rqst", + [SeedTableWriteIdsRequest, None], + None, + ), # 1 +) + + +class seed_write_id_result: + """ + Attributes: + - o1 + + """ + + def __init__( + self, + o1=None, + ): + self.o1 = o1 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("seed_write_id_result") + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(seed_write_id_result) +seed_write_id_result.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "o1", + [MetaException, None], + None, + ), # 1 +) + + +class seed_txn_id_args: + """ + Attributes: + - rqst + + """ + + def __init__( + self, + rqst=None, + ): + self.rqst = rqst + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.rqst = SeedTxnIdRequest() + self.rqst.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("seed_txn_id_args") + if self.rqst is not None: + oprot.writeFieldBegin("rqst", TType.STRUCT, 1) + self.rqst.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(seed_txn_id_args) +seed_txn_id_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "rqst", + [SeedTxnIdRequest, None], + None, + ), # 1 +) + + +class seed_txn_id_result: + """ + Attributes: + - o1 + + """ + + def __init__( + self, + o1=None, + ): + self.o1 = o1 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("seed_txn_id_result") + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(seed_txn_id_result) +seed_txn_id_result.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "o1", + [MetaException, None], + None, + ), # 1 +) + + +class lock_args: + """ + Attributes: + - rqst + + """ + + def __init__( + self, + rqst=None, + ): + self.rqst = rqst + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.rqst = LockRequest() + self.rqst.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("lock_args") + if self.rqst is not None: + oprot.writeFieldBegin("rqst", TType.STRUCT, 1) + self.rqst.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(lock_args) +lock_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "rqst", + [LockRequest, None], + None, + ), # 1 +) + + +class lock_result: + """ + Attributes: + - success + - o1 + - o2 + + """ + + def __init__( + self, + success=None, + o1=None, + o2=None, + ): + self.success = success + self.o1 = o1 + self.o2 = o2 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = LockResponse() + self.success.read(iprot) + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = NoSuchTxnException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = TxnAbortedException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("lock_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(lock_result) +lock_result.thrift_spec = ( + ( + 0, + TType.STRUCT, + "success", + [LockResponse, None], + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [NoSuchTxnException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [TxnAbortedException, None], + None, + ), # 2 +) + + +class check_lock_args: + """ + Attributes: + - rqst + + """ + + def __init__( + self, + rqst=None, + ): + self.rqst = rqst + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.rqst = CheckLockRequest() + self.rqst.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("check_lock_args") + if self.rqst is not None: + oprot.writeFieldBegin("rqst", TType.STRUCT, 1) + self.rqst.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(check_lock_args) +check_lock_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "rqst", + [CheckLockRequest, None], + None, + ), # 1 +) + + +class check_lock_result: + """ + Attributes: + - success + - o1 + - o2 + - o3 + + """ + + def __init__( + self, + success=None, + o1=None, + o2=None, + o3=None, + ): + self.success = success + self.o1 = o1 + self.o2 = o2 + self.o3 = o3 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = LockResponse() + self.success.read(iprot) + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = NoSuchTxnException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = TxnAbortedException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRUCT: + self.o3 = NoSuchLockException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("check_lock_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + if self.o3 is not None: + oprot.writeFieldBegin("o3", TType.STRUCT, 3) + self.o3.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(check_lock_result) +check_lock_result.thrift_spec = ( + ( + 0, + TType.STRUCT, + "success", + [LockResponse, None], + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [NoSuchTxnException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [TxnAbortedException, None], + None, + ), # 2 + ( + 3, + TType.STRUCT, + "o3", + [NoSuchLockException, None], + None, + ), # 3 +) + + +class unlock_args: + """ + Attributes: + - rqst + + """ + + def __init__( + self, + rqst=None, + ): + self.rqst = rqst + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.rqst = UnlockRequest() + self.rqst.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("unlock_args") + if self.rqst is not None: + oprot.writeFieldBegin("rqst", TType.STRUCT, 1) + self.rqst.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(unlock_args) +unlock_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "rqst", + [UnlockRequest, None], + None, + ), # 1 +) + + +class unlock_result: + """ + Attributes: + - o1 + - o2 + + """ + + def __init__( + self, + o1=None, + o2=None, + ): + self.o1 = o1 + self.o2 = o2 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.o1 = NoSuchLockException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = TxnOpenException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("unlock_result") + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(unlock_result) +unlock_result.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "o1", + [NoSuchLockException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [TxnOpenException, None], + None, + ), # 2 +) + + +class show_locks_args: + """ + Attributes: + - rqst + + """ + + def __init__( + self, + rqst=None, + ): + self.rqst = rqst + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.rqst = ShowLocksRequest() + self.rqst.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("show_locks_args") + if self.rqst is not None: + oprot.writeFieldBegin("rqst", TType.STRUCT, 1) + self.rqst.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(show_locks_args) +show_locks_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "rqst", + [ShowLocksRequest, None], + None, + ), # 1 +) + + +class show_locks_result: + """ + Attributes: + - success + + """ + + def __init__( + self, + success=None, + ): + self.success = success + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = ShowLocksResponse() + self.success.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("show_locks_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(show_locks_result) +show_locks_result.thrift_spec = ( + ( + 0, + TType.STRUCT, + "success", + [ShowLocksResponse, None], + None, + ), # 0 +) + + +class heartbeat_args: + """ + Attributes: + - ids + + """ + + def __init__( + self, + ids=None, + ): + self.ids = ids + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.ids = HeartbeatRequest() + self.ids.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("heartbeat_args") + if self.ids is not None: + oprot.writeFieldBegin("ids", TType.STRUCT, 1) + self.ids.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(heartbeat_args) +heartbeat_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "ids", + [HeartbeatRequest, None], + None, + ), # 1 +) + + +class heartbeat_result: + """ + Attributes: + - o1 + - o2 + - o3 + + """ + + def __init__( + self, + o1=None, + o2=None, + o3=None, + ): + self.o1 = o1 + self.o2 = o2 + self.o3 = o3 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.o1 = NoSuchLockException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = NoSuchTxnException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRUCT: + self.o3 = TxnAbortedException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("heartbeat_result") + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + if self.o3 is not None: + oprot.writeFieldBegin("o3", TType.STRUCT, 3) + self.o3.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(heartbeat_result) +heartbeat_result.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "o1", + [NoSuchLockException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [NoSuchTxnException, None], + None, + ), # 2 + ( + 3, + TType.STRUCT, + "o3", + [TxnAbortedException, None], + None, + ), # 3 +) + + +class heartbeat_txn_range_args: + """ + Attributes: + - txns + + """ + + def __init__( + self, + txns=None, + ): + self.txns = txns + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.txns = HeartbeatTxnRangeRequest() + self.txns.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("heartbeat_txn_range_args") + if self.txns is not None: + oprot.writeFieldBegin("txns", TType.STRUCT, 1) + self.txns.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(heartbeat_txn_range_args) +heartbeat_txn_range_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "txns", + [HeartbeatTxnRangeRequest, None], + None, + ), # 1 +) + + +class heartbeat_txn_range_result: + """ + Attributes: + - success + + """ + + def __init__( + self, + success=None, + ): + self.success = success + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = HeartbeatTxnRangeResponse() + self.success.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("heartbeat_txn_range_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(heartbeat_txn_range_result) +heartbeat_txn_range_result.thrift_spec = ( + ( + 0, + TType.STRUCT, + "success", + [HeartbeatTxnRangeResponse, None], + None, + ), # 0 +) + + +class compact_args: + """ + Attributes: + - rqst + + """ + + def __init__( + self, + rqst=None, + ): + self.rqst = rqst + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.rqst = CompactionRequest() + self.rqst.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("compact_args") + if self.rqst is not None: + oprot.writeFieldBegin("rqst", TType.STRUCT, 1) + self.rqst.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(compact_args) +compact_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "rqst", + [CompactionRequest, None], + None, + ), # 1 +) + + +class compact_result: + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("compact_result") + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(compact_result) +compact_result.thrift_spec = () + + +class compact2_args: + """ + Attributes: + - rqst + + """ + + def __init__( + self, + rqst=None, + ): + self.rqst = rqst + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.rqst = CompactionRequest() + self.rqst.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("compact2_args") + if self.rqst is not None: + oprot.writeFieldBegin("rqst", TType.STRUCT, 1) + self.rqst.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(compact2_args) +compact2_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "rqst", + [CompactionRequest, None], + None, + ), # 1 +) + + +class compact2_result: + """ + Attributes: + - success + + """ + + def __init__( + self, + success=None, + ): + self.success = success + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = CompactionResponse() + self.success.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("compact2_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(compact2_result) +compact2_result.thrift_spec = ( + ( + 0, + TType.STRUCT, + "success", + [CompactionResponse, None], + None, + ), # 0 +) + + +class show_compact_args: + """ + Attributes: + - rqst + + """ + + def __init__( + self, + rqst=None, + ): + self.rqst = rqst + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.rqst = ShowCompactRequest() + self.rqst.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("show_compact_args") + if self.rqst is not None: + oprot.writeFieldBegin("rqst", TType.STRUCT, 1) + self.rqst.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(show_compact_args) +show_compact_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "rqst", + [ShowCompactRequest, None], + None, + ), # 1 +) + + +class show_compact_result: + """ + Attributes: + - success + + """ + + def __init__( + self, + success=None, + ): + self.success = success + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = ShowCompactResponse() + self.success.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("show_compact_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(show_compact_result) +show_compact_result.thrift_spec = ( + ( + 0, + TType.STRUCT, + "success", + [ShowCompactResponse, None], + None, + ), # 0 +) + + +class add_dynamic_partitions_args: + """ + Attributes: + - rqst + + """ + + def __init__( + self, + rqst=None, + ): + self.rqst = rqst + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.rqst = AddDynamicPartitions() + self.rqst.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("add_dynamic_partitions_args") + if self.rqst is not None: + oprot.writeFieldBegin("rqst", TType.STRUCT, 1) + self.rqst.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(add_dynamic_partitions_args) +add_dynamic_partitions_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "rqst", + [AddDynamicPartitions, None], + None, + ), # 1 +) + + +class add_dynamic_partitions_result: + """ + Attributes: + - o1 + - o2 + + """ + + def __init__( + self, + o1=None, + o2=None, + ): + self.o1 = o1 + self.o2 = o2 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.o1 = NoSuchTxnException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = TxnAbortedException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("add_dynamic_partitions_result") + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(add_dynamic_partitions_result) +add_dynamic_partitions_result.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "o1", + [NoSuchTxnException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [TxnAbortedException, None], + None, + ), # 2 +) + + +class find_next_compact_args: + """ + Attributes: + - workerId + + """ + + def __init__( + self, + workerId=None, + ): + self.workerId = workerId + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.workerId = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("find_next_compact_args") + if self.workerId is not None: + oprot.writeFieldBegin("workerId", TType.STRING, 1) + oprot.writeString(self.workerId.encode("utf-8") if sys.version_info[0] == 2 else self.workerId) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(find_next_compact_args) +find_next_compact_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "workerId", + "UTF8", + None, + ), # 1 +) + + +class find_next_compact_result: + """ + Attributes: + - success + - o1 + + """ + + def __init__( + self, + success=None, + o1=None, + ): + self.success = success + self.o1 = o1 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = OptionalCompactionInfoStruct() + self.success.read(iprot) + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("find_next_compact_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(find_next_compact_result) +find_next_compact_result.thrift_spec = ( + ( + 0, + TType.STRUCT, + "success", + [OptionalCompactionInfoStruct, None], + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [MetaException, None], + None, + ), # 1 +) + + +class find_next_compact2_args: + """ + Attributes: + - rqst + + """ + + def __init__( + self, + rqst=None, + ): + self.rqst = rqst + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.rqst = FindNextCompactRequest() + self.rqst.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("find_next_compact2_args") + if self.rqst is not None: + oprot.writeFieldBegin("rqst", TType.STRUCT, 1) + self.rqst.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(find_next_compact2_args) +find_next_compact2_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "rqst", + [FindNextCompactRequest, None], + None, + ), # 1 +) + + +class find_next_compact2_result: + """ + Attributes: + - success + - o1 + + """ + + def __init__( + self, + success=None, + o1=None, + ): + self.success = success + self.o1 = o1 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = OptionalCompactionInfoStruct() + self.success.read(iprot) + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("find_next_compact2_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(find_next_compact2_result) +find_next_compact2_result.thrift_spec = ( + ( + 0, + TType.STRUCT, + "success", + [OptionalCompactionInfoStruct, None], + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [MetaException, None], + None, + ), # 1 +) + + +class update_compactor_state_args: + """ + Attributes: + - cr + - txn_id + + """ + + def __init__( + self, + cr=None, + txn_id=None, + ): + self.cr = cr + self.txn_id = txn_id + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.cr = CompactionInfoStruct() + self.cr.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.I64: + self.txn_id = iprot.readI64() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("update_compactor_state_args") + if self.cr is not None: + oprot.writeFieldBegin("cr", TType.STRUCT, 1) + self.cr.write(oprot) + oprot.writeFieldEnd() + if self.txn_id is not None: + oprot.writeFieldBegin("txn_id", TType.I64, 2) + oprot.writeI64(self.txn_id) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(update_compactor_state_args) +update_compactor_state_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "cr", + [CompactionInfoStruct, None], + None, + ), # 1 + ( + 2, + TType.I64, + "txn_id", + None, + None, + ), # 2 +) + + +class update_compactor_state_result: + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("update_compactor_state_result") + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(update_compactor_state_result) +update_compactor_state_result.thrift_spec = () + + +class find_columns_with_stats_args: + """ + Attributes: + - cr + + """ + + def __init__( + self, + cr=None, + ): + self.cr = cr + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.cr = CompactionInfoStruct() + self.cr.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("find_columns_with_stats_args") + if self.cr is not None: + oprot.writeFieldBegin("cr", TType.STRUCT, 1) + self.cr.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(find_columns_with_stats_args) +find_columns_with_stats_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "cr", + [CompactionInfoStruct, None], + None, + ), # 1 +) + + +class find_columns_with_stats_result: + """ + Attributes: + - success + + """ + + def __init__( + self, + success=None, + ): + self.success = success + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.LIST: + self.success = [] + (_etype1768, _size1765) = iprot.readListBegin() + for _i1769 in range(_size1765): + _elem1770 = ( + iprot.readString().decode("utf-8", errors="replace") + if sys.version_info[0] == 2 + else iprot.readString() + ) + self.success.append(_elem1770) + iprot.readListEnd() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("find_columns_with_stats_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.LIST, 0) + oprot.writeListBegin(TType.STRING, len(self.success)) + for iter1771 in self.success: + oprot.writeString(iter1771.encode("utf-8") if sys.version_info[0] == 2 else iter1771) + oprot.writeListEnd() + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(find_columns_with_stats_result) +find_columns_with_stats_result.thrift_spec = ( + ( + 0, + TType.LIST, + "success", + (TType.STRING, "UTF8", False), + None, + ), # 0 +) + + +class mark_cleaned_args: + """ + Attributes: + - cr + + """ + + def __init__( + self, + cr=None, + ): + self.cr = cr + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.cr = CompactionInfoStruct() + self.cr.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("mark_cleaned_args") + if self.cr is not None: + oprot.writeFieldBegin("cr", TType.STRUCT, 1) + self.cr.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(mark_cleaned_args) +mark_cleaned_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "cr", + [CompactionInfoStruct, None], + None, + ), # 1 +) + + +class mark_cleaned_result: + """ + Attributes: + - o1 + + """ + + def __init__( + self, + o1=None, + ): + self.o1 = o1 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("mark_cleaned_result") + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(mark_cleaned_result) +mark_cleaned_result.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "o1", + [MetaException, None], + None, + ), # 1 +) + + +class mark_compacted_args: + """ + Attributes: + - cr + + """ + + def __init__( + self, + cr=None, + ): + self.cr = cr + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.cr = CompactionInfoStruct() + self.cr.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("mark_compacted_args") + if self.cr is not None: + oprot.writeFieldBegin("cr", TType.STRUCT, 1) + self.cr.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(mark_compacted_args) +mark_compacted_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "cr", + [CompactionInfoStruct, None], + None, + ), # 1 +) + + +class mark_compacted_result: + """ + Attributes: + - o1 + + """ + + def __init__( + self, + o1=None, + ): + self.o1 = o1 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("mark_compacted_result") + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(mark_compacted_result) +mark_compacted_result.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "o1", + [MetaException, None], + None, + ), # 1 +) + + +class mark_failed_args: + """ + Attributes: + - cr + + """ + + def __init__( + self, + cr=None, + ): + self.cr = cr + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.cr = CompactionInfoStruct() + self.cr.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("mark_failed_args") + if self.cr is not None: + oprot.writeFieldBegin("cr", TType.STRUCT, 1) + self.cr.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(mark_failed_args) +mark_failed_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "cr", + [CompactionInfoStruct, None], + None, + ), # 1 +) + + +class mark_failed_result: + """ + Attributes: + - o1 + + """ + + def __init__( + self, + o1=None, + ): + self.o1 = o1 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("mark_failed_result") + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(mark_failed_result) +mark_failed_result.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "o1", + [MetaException, None], + None, + ), # 1 +) + + +class mark_refused_args: + """ + Attributes: + - cr + + """ + + def __init__( + self, + cr=None, + ): + self.cr = cr + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.cr = CompactionInfoStruct() + self.cr.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("mark_refused_args") + if self.cr is not None: + oprot.writeFieldBegin("cr", TType.STRUCT, 1) + self.cr.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(mark_refused_args) +mark_refused_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "cr", + [CompactionInfoStruct, None], + None, + ), # 1 +) + + +class mark_refused_result: + """ + Attributes: + - o1 + + """ + + def __init__( + self, + o1=None, + ): + self.o1 = o1 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("mark_refused_result") + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(mark_refused_result) +mark_refused_result.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "o1", + [MetaException, None], + None, + ), # 1 +) + + +class update_compaction_metrics_data_args: + """ + Attributes: + - data + + """ + + def __init__( + self, + data=None, + ): + self.data = data + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.data = CompactionMetricsDataStruct() + self.data.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("update_compaction_metrics_data_args") + if self.data is not None: + oprot.writeFieldBegin("data", TType.STRUCT, 1) + self.data.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(update_compaction_metrics_data_args) +update_compaction_metrics_data_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "data", + [CompactionMetricsDataStruct, None], + None, + ), # 1 +) + + +class update_compaction_metrics_data_result: + """ + Attributes: + - success + - o1 + + """ + + def __init__( + self, + success=None, + o1=None, + ): + self.success = success + self.o1 = o1 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.BOOL: + self.success = iprot.readBool() + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("update_compaction_metrics_data_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.BOOL, 0) + oprot.writeBool(self.success) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(update_compaction_metrics_data_result) +update_compaction_metrics_data_result.thrift_spec = ( + ( + 0, + TType.BOOL, + "success", + None, + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [MetaException, None], + None, + ), # 1 +) + + +class remove_compaction_metrics_data_args: + """ + Attributes: + - request + + """ + + def __init__( + self, + request=None, + ): + self.request = request + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.request = CompactionMetricsDataRequest() + self.request.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("remove_compaction_metrics_data_args") + if self.request is not None: + oprot.writeFieldBegin("request", TType.STRUCT, 1) + self.request.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(remove_compaction_metrics_data_args) +remove_compaction_metrics_data_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "request", + [CompactionMetricsDataRequest, None], + None, + ), # 1 +) + + +class remove_compaction_metrics_data_result: + """ + Attributes: + - o1 + + """ + + def __init__( + self, + o1=None, + ): + self.o1 = o1 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("remove_compaction_metrics_data_result") + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(remove_compaction_metrics_data_result) +remove_compaction_metrics_data_result.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "o1", + [MetaException, None], + None, + ), # 1 +) + + +class set_hadoop_jobid_args: + """ + Attributes: + - jobId + - cq_id + + """ + + def __init__( + self, + jobId=None, + cq_id=None, + ): + self.jobId = jobId + self.cq_id = cq_id + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.jobId = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.I64: + self.cq_id = iprot.readI64() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("set_hadoop_jobid_args") + if self.jobId is not None: + oprot.writeFieldBegin("jobId", TType.STRING, 1) + oprot.writeString(self.jobId.encode("utf-8") if sys.version_info[0] == 2 else self.jobId) + oprot.writeFieldEnd() + if self.cq_id is not None: + oprot.writeFieldBegin("cq_id", TType.I64, 2) + oprot.writeI64(self.cq_id) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(set_hadoop_jobid_args) +set_hadoop_jobid_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "jobId", + "UTF8", + None, + ), # 1 + ( + 2, + TType.I64, + "cq_id", + None, + None, + ), # 2 +) + + +class set_hadoop_jobid_result: + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("set_hadoop_jobid_result") + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(set_hadoop_jobid_result) +set_hadoop_jobid_result.thrift_spec = () + + +class get_latest_committed_compaction_info_args: + """ + Attributes: + - rqst + + """ + + def __init__( + self, + rqst=None, + ): + self.rqst = rqst + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.rqst = GetLatestCommittedCompactionInfoRequest() + self.rqst.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_latest_committed_compaction_info_args") + if self.rqst is not None: + oprot.writeFieldBegin("rqst", TType.STRUCT, 1) + self.rqst.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_latest_committed_compaction_info_args) +get_latest_committed_compaction_info_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "rqst", + [GetLatestCommittedCompactionInfoRequest, None], + None, + ), # 1 +) + + +class get_latest_committed_compaction_info_result: + """ + Attributes: + - success + + """ + + def __init__( + self, + success=None, + ): + self.success = success + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = GetLatestCommittedCompactionInfoResponse() + self.success.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_latest_committed_compaction_info_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_latest_committed_compaction_info_result) +get_latest_committed_compaction_info_result.thrift_spec = ( + ( + 0, + TType.STRUCT, + "success", + [GetLatestCommittedCompactionInfoResponse, None], + None, + ), # 0 +) + + +class get_next_notification_args: + """ + Attributes: + - rqst + + """ + + def __init__( + self, + rqst=None, + ): + self.rqst = rqst + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.rqst = NotificationEventRequest() + self.rqst.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_next_notification_args") + if self.rqst is not None: + oprot.writeFieldBegin("rqst", TType.STRUCT, 1) + self.rqst.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_next_notification_args) +get_next_notification_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "rqst", + [NotificationEventRequest, None], + None, + ), # 1 +) + + +class get_next_notification_result: + """ + Attributes: + - success + + """ + + def __init__( + self, + success=None, + ): + self.success = success + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = NotificationEventResponse() + self.success.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_next_notification_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_next_notification_result) +get_next_notification_result.thrift_spec = ( + ( + 0, + TType.STRUCT, + "success", + [NotificationEventResponse, None], + None, + ), # 0 +) + + +class get_current_notificationEventId_args: + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_current_notificationEventId_args") + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_current_notificationEventId_args) +get_current_notificationEventId_args.thrift_spec = () + + +class get_current_notificationEventId_result: + """ + Attributes: + - success + + """ + + def __init__( + self, + success=None, + ): + self.success = success + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = CurrentNotificationEventId() + self.success.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_current_notificationEventId_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_current_notificationEventId_result) +get_current_notificationEventId_result.thrift_spec = ( + ( + 0, + TType.STRUCT, + "success", + [CurrentNotificationEventId, None], + None, + ), # 0 +) + + +class get_notification_events_count_args: + """ + Attributes: + - rqst + + """ + + def __init__( + self, + rqst=None, + ): + self.rqst = rqst + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.rqst = NotificationEventsCountRequest() + self.rqst.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_notification_events_count_args") + if self.rqst is not None: + oprot.writeFieldBegin("rqst", TType.STRUCT, 1) + self.rqst.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_notification_events_count_args) +get_notification_events_count_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "rqst", + [NotificationEventsCountRequest, None], + None, + ), # 1 +) + + +class get_notification_events_count_result: + """ + Attributes: + - success + + """ + + def __init__( + self, + success=None, + ): + self.success = success + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = NotificationEventsCountResponse() + self.success.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_notification_events_count_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_notification_events_count_result) +get_notification_events_count_result.thrift_spec = ( + ( + 0, + TType.STRUCT, + "success", + [NotificationEventsCountResponse, None], + None, + ), # 0 +) + + +class fire_listener_event_args: + """ + Attributes: + - rqst + + """ + + def __init__( + self, + rqst=None, + ): + self.rqst = rqst + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.rqst = FireEventRequest() + self.rqst.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("fire_listener_event_args") + if self.rqst is not None: + oprot.writeFieldBegin("rqst", TType.STRUCT, 1) + self.rqst.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(fire_listener_event_args) +fire_listener_event_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "rqst", + [FireEventRequest, None], + None, + ), # 1 +) + + +class fire_listener_event_result: + """ + Attributes: + - success + + """ + + def __init__( + self, + success=None, + ): + self.success = success + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = FireEventResponse() + self.success.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("fire_listener_event_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(fire_listener_event_result) +fire_listener_event_result.thrift_spec = ( + ( + 0, + TType.STRUCT, + "success", + [FireEventResponse, None], + None, + ), # 0 +) + + +class flushCache_args: + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("flushCache_args") + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(flushCache_args) +flushCache_args.thrift_spec = () + + +class flushCache_result: + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("flushCache_result") + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(flushCache_result) +flushCache_result.thrift_spec = () + + +class add_write_notification_log_args: + """ + Attributes: + - rqst + + """ + + def __init__( + self, + rqst=None, + ): + self.rqst = rqst + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.rqst = WriteNotificationLogRequest() + self.rqst.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("add_write_notification_log_args") + if self.rqst is not None: + oprot.writeFieldBegin("rqst", TType.STRUCT, 1) + self.rqst.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(add_write_notification_log_args) +add_write_notification_log_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "rqst", + [WriteNotificationLogRequest, None], + None, + ), # 1 +) + + +class add_write_notification_log_result: + """ + Attributes: + - success + + """ + + def __init__( + self, + success=None, + ): + self.success = success + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = WriteNotificationLogResponse() + self.success.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("add_write_notification_log_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(add_write_notification_log_result) +add_write_notification_log_result.thrift_spec = ( + ( + 0, + TType.STRUCT, + "success", + [WriteNotificationLogResponse, None], + None, + ), # 0 +) + + +class add_write_notification_log_in_batch_args: + """ + Attributes: + - rqst + + """ + + def __init__( + self, + rqst=None, + ): + self.rqst = rqst + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.rqst = WriteNotificationLogBatchRequest() + self.rqst.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("add_write_notification_log_in_batch_args") + if self.rqst is not None: + oprot.writeFieldBegin("rqst", TType.STRUCT, 1) + self.rqst.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(add_write_notification_log_in_batch_args) +add_write_notification_log_in_batch_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "rqst", + [WriteNotificationLogBatchRequest, None], + None, + ), # 1 +) + + +class add_write_notification_log_in_batch_result: + """ + Attributes: + - success + + """ + + def __init__( + self, + success=None, + ): + self.success = success + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = WriteNotificationLogBatchResponse() + self.success.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("add_write_notification_log_in_batch_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(add_write_notification_log_in_batch_result) +add_write_notification_log_in_batch_result.thrift_spec = ( + ( + 0, + TType.STRUCT, + "success", + [WriteNotificationLogBatchResponse, None], + None, + ), # 0 +) + + +class cm_recycle_args: + """ + Attributes: + - request + + """ + + def __init__( + self, + request=None, + ): + self.request = request + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.request = CmRecycleRequest() + self.request.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("cm_recycle_args") + if self.request is not None: + oprot.writeFieldBegin("request", TType.STRUCT, 1) + self.request.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(cm_recycle_args) +cm_recycle_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "request", + [CmRecycleRequest, None], + None, + ), # 1 +) + + +class cm_recycle_result: + """ + Attributes: + - success + - o1 + + """ + + def __init__( + self, + success=None, + o1=None, + ): + self.success = success + self.o1 = o1 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = CmRecycleResponse() + self.success.read(iprot) + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("cm_recycle_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(cm_recycle_result) +cm_recycle_result.thrift_spec = ( + ( + 0, + TType.STRUCT, + "success", + [CmRecycleResponse, None], + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [MetaException, None], + None, + ), # 1 +) + + +class get_file_metadata_by_expr_args: + """ + Attributes: + - req + + """ + + def __init__( + self, + req=None, + ): + self.req = req + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.req = GetFileMetadataByExprRequest() + self.req.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_file_metadata_by_expr_args") + if self.req is not None: + oprot.writeFieldBegin("req", TType.STRUCT, 1) + self.req.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_file_metadata_by_expr_args) +get_file_metadata_by_expr_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "req", + [GetFileMetadataByExprRequest, None], + None, + ), # 1 +) + + +class get_file_metadata_by_expr_result: + """ + Attributes: + - success + + """ + + def __init__( + self, + success=None, + ): + self.success = success + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = GetFileMetadataByExprResult() + self.success.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_file_metadata_by_expr_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_file_metadata_by_expr_result) +get_file_metadata_by_expr_result.thrift_spec = ( + ( + 0, + TType.STRUCT, + "success", + [GetFileMetadataByExprResult, None], + None, + ), # 0 +) + + +class get_file_metadata_args: + """ + Attributes: + - req + + """ + + def __init__( + self, + req=None, + ): + self.req = req + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.req = GetFileMetadataRequest() + self.req.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_file_metadata_args") + if self.req is not None: + oprot.writeFieldBegin("req", TType.STRUCT, 1) + self.req.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_file_metadata_args) +get_file_metadata_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "req", + [GetFileMetadataRequest, None], + None, + ), # 1 +) + + +class get_file_metadata_result: + """ + Attributes: + - success + + """ + + def __init__( + self, + success=None, + ): + self.success = success + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = GetFileMetadataResult() + self.success.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_file_metadata_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_file_metadata_result) +get_file_metadata_result.thrift_spec = ( + ( + 0, + TType.STRUCT, + "success", + [GetFileMetadataResult, None], + None, + ), # 0 +) + + +class put_file_metadata_args: + """ + Attributes: + - req + + """ + + def __init__( + self, + req=None, + ): + self.req = req + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.req = PutFileMetadataRequest() + self.req.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("put_file_metadata_args") + if self.req is not None: + oprot.writeFieldBegin("req", TType.STRUCT, 1) + self.req.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(put_file_metadata_args) +put_file_metadata_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "req", + [PutFileMetadataRequest, None], + None, + ), # 1 +) + + +class put_file_metadata_result: + """ + Attributes: + - success + + """ + + def __init__( + self, + success=None, + ): + self.success = success + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = PutFileMetadataResult() + self.success.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("put_file_metadata_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(put_file_metadata_result) +put_file_metadata_result.thrift_spec = ( + ( + 0, + TType.STRUCT, + "success", + [PutFileMetadataResult, None], + None, + ), # 0 +) + + +class clear_file_metadata_args: + """ + Attributes: + - req + + """ + + def __init__( + self, + req=None, + ): + self.req = req + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.req = ClearFileMetadataRequest() + self.req.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("clear_file_metadata_args") + if self.req is not None: + oprot.writeFieldBegin("req", TType.STRUCT, 1) + self.req.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(clear_file_metadata_args) +clear_file_metadata_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "req", + [ClearFileMetadataRequest, None], + None, + ), # 1 +) + + +class clear_file_metadata_result: + """ + Attributes: + - success + + """ + + def __init__( + self, + success=None, + ): + self.success = success + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = ClearFileMetadataResult() + self.success.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("clear_file_metadata_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(clear_file_metadata_result) +clear_file_metadata_result.thrift_spec = ( + ( + 0, + TType.STRUCT, + "success", + [ClearFileMetadataResult, None], + None, + ), # 0 +) + + +class cache_file_metadata_args: + """ + Attributes: + - req + + """ + + def __init__( + self, + req=None, + ): + self.req = req + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.req = CacheFileMetadataRequest() + self.req.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("cache_file_metadata_args") + if self.req is not None: + oprot.writeFieldBegin("req", TType.STRUCT, 1) + self.req.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(cache_file_metadata_args) +cache_file_metadata_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "req", + [CacheFileMetadataRequest, None], + None, + ), # 1 +) + + +class cache_file_metadata_result: + """ + Attributes: + - success + + """ + + def __init__( + self, + success=None, + ): + self.success = success + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = CacheFileMetadataResult() + self.success.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("cache_file_metadata_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(cache_file_metadata_result) +cache_file_metadata_result.thrift_spec = ( + ( + 0, + TType.STRUCT, + "success", + [CacheFileMetadataResult, None], + None, + ), # 0 +) + + +class get_metastore_db_uuid_args: + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_metastore_db_uuid_args") + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_metastore_db_uuid_args) +get_metastore_db_uuid_args.thrift_spec = () + + +class get_metastore_db_uuid_result: + """ + Attributes: + - success + - o1 + + """ + + def __init__( + self, + success=None, + o1=None, + ): + self.success = success + self.o1 = o1 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRING: + self.success = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_metastore_db_uuid_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.STRING, 0) + oprot.writeString(self.success.encode("utf-8") if sys.version_info[0] == 2 else self.success) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_metastore_db_uuid_result) +get_metastore_db_uuid_result.thrift_spec = ( + ( + 0, + TType.STRING, + "success", + "UTF8", + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [MetaException, None], + None, + ), # 1 +) + + +class create_resource_plan_args: + """ + Attributes: + - request + + """ + + def __init__( + self, + request=None, + ): + self.request = request + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.request = WMCreateResourcePlanRequest() + self.request.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("create_resource_plan_args") + if self.request is not None: + oprot.writeFieldBegin("request", TType.STRUCT, 1) + self.request.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(create_resource_plan_args) +create_resource_plan_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "request", + [WMCreateResourcePlanRequest, None], + None, + ), # 1 +) + + +class create_resource_plan_result: + """ + Attributes: + - success + - o1 + - o2 + - o3 + + """ + + def __init__( + self, + success=None, + o1=None, + o2=None, + o3=None, + ): + self.success = success + self.o1 = o1 + self.o2 = o2 + self.o3 = o3 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = WMCreateResourcePlanResponse() + self.success.read(iprot) + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = AlreadyExistsException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = InvalidObjectException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRUCT: + self.o3 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("create_resource_plan_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + if self.o3 is not None: + oprot.writeFieldBegin("o3", TType.STRUCT, 3) + self.o3.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(create_resource_plan_result) +create_resource_plan_result.thrift_spec = ( + ( + 0, + TType.STRUCT, + "success", + [WMCreateResourcePlanResponse, None], + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [AlreadyExistsException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [InvalidObjectException, None], + None, + ), # 2 + ( + 3, + TType.STRUCT, + "o3", + [MetaException, None], + None, + ), # 3 +) + + +class get_resource_plan_args: + """ + Attributes: + - request + + """ + + def __init__( + self, + request=None, + ): + self.request = request + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.request = WMGetResourcePlanRequest() + self.request.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_resource_plan_args") + if self.request is not None: + oprot.writeFieldBegin("request", TType.STRUCT, 1) + self.request.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_resource_plan_args) +get_resource_plan_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "request", + [WMGetResourcePlanRequest, None], + None, + ), # 1 +) + + +class get_resource_plan_result: + """ + Attributes: + - success + - o1 + - o2 + + """ + + def __init__( + self, + success=None, + o1=None, + o2=None, + ): + self.success = success + self.o1 = o1 + self.o2 = o2 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = WMGetResourcePlanResponse() + self.success.read(iprot) + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = NoSuchObjectException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_resource_plan_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_resource_plan_result) +get_resource_plan_result.thrift_spec = ( + ( + 0, + TType.STRUCT, + "success", + [WMGetResourcePlanResponse, None], + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [NoSuchObjectException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [MetaException, None], + None, + ), # 2 +) + + +class get_active_resource_plan_args: + """ + Attributes: + - request + + """ + + def __init__( + self, + request=None, + ): + self.request = request + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.request = WMGetActiveResourcePlanRequest() + self.request.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_active_resource_plan_args") + if self.request is not None: + oprot.writeFieldBegin("request", TType.STRUCT, 1) + self.request.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_active_resource_plan_args) +get_active_resource_plan_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "request", + [WMGetActiveResourcePlanRequest, None], + None, + ), # 1 +) + + +class get_active_resource_plan_result: + """ + Attributes: + - success + - o2 + + """ + + def __init__( + self, + success=None, + o2=None, + ): + self.success = success + self.o2 = o2 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = WMGetActiveResourcePlanResponse() + self.success.read(iprot) + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o2 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_active_resource_plan_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 1) + self.o2.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_active_resource_plan_result) +get_active_resource_plan_result.thrift_spec = ( + ( + 0, + TType.STRUCT, + "success", + [WMGetActiveResourcePlanResponse, None], + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o2", + [MetaException, None], + None, + ), # 1 +) + + +class get_all_resource_plans_args: + """ + Attributes: + - request + + """ + + def __init__( + self, + request=None, + ): + self.request = request + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.request = WMGetAllResourcePlanRequest() + self.request.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_all_resource_plans_args") + if self.request is not None: + oprot.writeFieldBegin("request", TType.STRUCT, 1) + self.request.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_all_resource_plans_args) +get_all_resource_plans_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "request", + [WMGetAllResourcePlanRequest, None], + None, + ), # 1 +) + + +class get_all_resource_plans_result: + """ + Attributes: + - success + - o1 + + """ + + def __init__( + self, + success=None, + o1=None, + ): + self.success = success + self.o1 = o1 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = WMGetAllResourcePlanResponse() + self.success.read(iprot) + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_all_resource_plans_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_all_resource_plans_result) +get_all_resource_plans_result.thrift_spec = ( + ( + 0, + TType.STRUCT, + "success", + [WMGetAllResourcePlanResponse, None], + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [MetaException, None], + None, + ), # 1 +) + + +class alter_resource_plan_args: + """ + Attributes: + - request + + """ + + def __init__( + self, + request=None, + ): + self.request = request + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.request = WMAlterResourcePlanRequest() + self.request.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("alter_resource_plan_args") + if self.request is not None: + oprot.writeFieldBegin("request", TType.STRUCT, 1) + self.request.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(alter_resource_plan_args) +alter_resource_plan_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "request", + [WMAlterResourcePlanRequest, None], + None, + ), # 1 +) + + +class alter_resource_plan_result: + """ + Attributes: + - success + - o1 + - o2 + - o3 + + """ + + def __init__( + self, + success=None, + o1=None, + o2=None, + o3=None, + ): + self.success = success + self.o1 = o1 + self.o2 = o2 + self.o3 = o3 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = WMAlterResourcePlanResponse() + self.success.read(iprot) + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = NoSuchObjectException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = InvalidOperationException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRUCT: + self.o3 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("alter_resource_plan_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + if self.o3 is not None: + oprot.writeFieldBegin("o3", TType.STRUCT, 3) + self.o3.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(alter_resource_plan_result) +alter_resource_plan_result.thrift_spec = ( + ( + 0, + TType.STRUCT, + "success", + [WMAlterResourcePlanResponse, None], + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [NoSuchObjectException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [InvalidOperationException, None], + None, + ), # 2 + ( + 3, + TType.STRUCT, + "o3", + [MetaException, None], + None, + ), # 3 +) + + +class validate_resource_plan_args: + """ + Attributes: + - request + + """ + + def __init__( + self, + request=None, + ): + self.request = request + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.request = WMValidateResourcePlanRequest() + self.request.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("validate_resource_plan_args") + if self.request is not None: + oprot.writeFieldBegin("request", TType.STRUCT, 1) + self.request.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(validate_resource_plan_args) +validate_resource_plan_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "request", + [WMValidateResourcePlanRequest, None], + None, + ), # 1 +) + + +class validate_resource_plan_result: + """ + Attributes: + - success + - o1 + - o2 + + """ + + def __init__( + self, + success=None, + o1=None, + o2=None, + ): + self.success = success + self.o1 = o1 + self.o2 = o2 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = WMValidateResourcePlanResponse() + self.success.read(iprot) + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = NoSuchObjectException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("validate_resource_plan_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(validate_resource_plan_result) +validate_resource_plan_result.thrift_spec = ( + ( + 0, + TType.STRUCT, + "success", + [WMValidateResourcePlanResponse, None], + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [NoSuchObjectException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [MetaException, None], + None, + ), # 2 +) + + +class drop_resource_plan_args: + """ + Attributes: + - request + + """ + + def __init__( + self, + request=None, + ): + self.request = request + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.request = WMDropResourcePlanRequest() + self.request.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("drop_resource_plan_args") + if self.request is not None: + oprot.writeFieldBegin("request", TType.STRUCT, 1) + self.request.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(drop_resource_plan_args) +drop_resource_plan_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "request", + [WMDropResourcePlanRequest, None], + None, + ), # 1 +) + + +class drop_resource_plan_result: + """ + Attributes: + - success + - o1 + - o2 + - o3 + + """ + + def __init__( + self, + success=None, + o1=None, + o2=None, + o3=None, + ): + self.success = success + self.o1 = o1 + self.o2 = o2 + self.o3 = o3 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = WMDropResourcePlanResponse() + self.success.read(iprot) + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = NoSuchObjectException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = InvalidOperationException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRUCT: + self.o3 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("drop_resource_plan_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + if self.o3 is not None: + oprot.writeFieldBegin("o3", TType.STRUCT, 3) + self.o3.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(drop_resource_plan_result) +drop_resource_plan_result.thrift_spec = ( + ( + 0, + TType.STRUCT, + "success", + [WMDropResourcePlanResponse, None], + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [NoSuchObjectException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [InvalidOperationException, None], + None, + ), # 2 + ( + 3, + TType.STRUCT, + "o3", + [MetaException, None], + None, + ), # 3 +) + + +class create_wm_trigger_args: + """ + Attributes: + - request + + """ + + def __init__( + self, + request=None, + ): + self.request = request + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.request = WMCreateTriggerRequest() + self.request.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("create_wm_trigger_args") + if self.request is not None: + oprot.writeFieldBegin("request", TType.STRUCT, 1) + self.request.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(create_wm_trigger_args) +create_wm_trigger_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "request", + [WMCreateTriggerRequest, None], + None, + ), # 1 +) + + +class create_wm_trigger_result: + """ + Attributes: + - success + - o1 + - o2 + - o3 + - o4 + + """ + + def __init__( + self, + success=None, + o1=None, + o2=None, + o3=None, + o4=None, + ): + self.success = success + self.o1 = o1 + self.o2 = o2 + self.o3 = o3 + self.o4 = o4 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = WMCreateTriggerResponse() + self.success.read(iprot) + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = AlreadyExistsException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = NoSuchObjectException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRUCT: + self.o3 = InvalidObjectException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRUCT: + self.o4 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("create_wm_trigger_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + if self.o3 is not None: + oprot.writeFieldBegin("o3", TType.STRUCT, 3) + self.o3.write(oprot) + oprot.writeFieldEnd() + if self.o4 is not None: + oprot.writeFieldBegin("o4", TType.STRUCT, 4) + self.o4.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(create_wm_trigger_result) +create_wm_trigger_result.thrift_spec = ( + ( + 0, + TType.STRUCT, + "success", + [WMCreateTriggerResponse, None], + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [AlreadyExistsException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [NoSuchObjectException, None], + None, + ), # 2 + ( + 3, + TType.STRUCT, + "o3", + [InvalidObjectException, None], + None, + ), # 3 + ( + 4, + TType.STRUCT, + "o4", + [MetaException, None], + None, + ), # 4 +) + + +class alter_wm_trigger_args: + """ + Attributes: + - request + + """ + + def __init__( + self, + request=None, + ): + self.request = request + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.request = WMAlterTriggerRequest() + self.request.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("alter_wm_trigger_args") + if self.request is not None: + oprot.writeFieldBegin("request", TType.STRUCT, 1) + self.request.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(alter_wm_trigger_args) +alter_wm_trigger_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "request", + [WMAlterTriggerRequest, None], + None, + ), # 1 +) + + +class alter_wm_trigger_result: + """ + Attributes: + - success + - o1 + - o2 + - o3 + + """ + + def __init__( + self, + success=None, + o1=None, + o2=None, + o3=None, + ): + self.success = success + self.o1 = o1 + self.o2 = o2 + self.o3 = o3 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = WMAlterTriggerResponse() + self.success.read(iprot) + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = NoSuchObjectException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = InvalidObjectException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRUCT: + self.o3 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("alter_wm_trigger_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + if self.o3 is not None: + oprot.writeFieldBegin("o3", TType.STRUCT, 3) + self.o3.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(alter_wm_trigger_result) +alter_wm_trigger_result.thrift_spec = ( + ( + 0, + TType.STRUCT, + "success", + [WMAlterTriggerResponse, None], + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [NoSuchObjectException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [InvalidObjectException, None], + None, + ), # 2 + ( + 3, + TType.STRUCT, + "o3", + [MetaException, None], + None, + ), # 3 +) + + +class drop_wm_trigger_args: + """ + Attributes: + - request + + """ + + def __init__( + self, + request=None, + ): + self.request = request + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.request = WMDropTriggerRequest() + self.request.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("drop_wm_trigger_args") + if self.request is not None: + oprot.writeFieldBegin("request", TType.STRUCT, 1) + self.request.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(drop_wm_trigger_args) +drop_wm_trigger_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "request", + [WMDropTriggerRequest, None], + None, + ), # 1 +) + + +class drop_wm_trigger_result: + """ + Attributes: + - success + - o1 + - o2 + - o3 + + """ + + def __init__( + self, + success=None, + o1=None, + o2=None, + o3=None, + ): + self.success = success + self.o1 = o1 + self.o2 = o2 + self.o3 = o3 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = WMDropTriggerResponse() + self.success.read(iprot) + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = NoSuchObjectException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = InvalidOperationException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRUCT: + self.o3 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("drop_wm_trigger_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + if self.o3 is not None: + oprot.writeFieldBegin("o3", TType.STRUCT, 3) + self.o3.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(drop_wm_trigger_result) +drop_wm_trigger_result.thrift_spec = ( + ( + 0, + TType.STRUCT, + "success", + [WMDropTriggerResponse, None], + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [NoSuchObjectException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [InvalidOperationException, None], + None, + ), # 2 + ( + 3, + TType.STRUCT, + "o3", + [MetaException, None], + None, + ), # 3 +) + + +class get_triggers_for_resourceplan_args: + """ + Attributes: + - request + + """ + + def __init__( + self, + request=None, + ): + self.request = request + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.request = WMGetTriggersForResourePlanRequest() + self.request.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_triggers_for_resourceplan_args") + if self.request is not None: + oprot.writeFieldBegin("request", TType.STRUCT, 1) + self.request.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_triggers_for_resourceplan_args) +get_triggers_for_resourceplan_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "request", + [WMGetTriggersForResourePlanRequest, None], + None, + ), # 1 +) + + +class get_triggers_for_resourceplan_result: + """ + Attributes: + - success + - o1 + - o2 + + """ + + def __init__( + self, + success=None, + o1=None, + o2=None, + ): + self.success = success + self.o1 = o1 + self.o2 = o2 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = WMGetTriggersForResourePlanResponse() + self.success.read(iprot) + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = NoSuchObjectException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_triggers_for_resourceplan_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_triggers_for_resourceplan_result) +get_triggers_for_resourceplan_result.thrift_spec = ( + ( + 0, + TType.STRUCT, + "success", + [WMGetTriggersForResourePlanResponse, None], + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [NoSuchObjectException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [MetaException, None], + None, + ), # 2 +) + + +class create_wm_pool_args: + """ + Attributes: + - request + + """ + + def __init__( + self, + request=None, + ): + self.request = request + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.request = WMCreatePoolRequest() + self.request.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("create_wm_pool_args") + if self.request is not None: + oprot.writeFieldBegin("request", TType.STRUCT, 1) + self.request.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(create_wm_pool_args) +create_wm_pool_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "request", + [WMCreatePoolRequest, None], + None, + ), # 1 +) + + +class create_wm_pool_result: + """ + Attributes: + - success + - o1 + - o2 + - o3 + - o4 + + """ + + def __init__( + self, + success=None, + o1=None, + o2=None, + o3=None, + o4=None, + ): + self.success = success + self.o1 = o1 + self.o2 = o2 + self.o3 = o3 + self.o4 = o4 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = WMCreatePoolResponse() + self.success.read(iprot) + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = AlreadyExistsException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = NoSuchObjectException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRUCT: + self.o3 = InvalidObjectException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRUCT: + self.o4 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("create_wm_pool_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + if self.o3 is not None: + oprot.writeFieldBegin("o3", TType.STRUCT, 3) + self.o3.write(oprot) + oprot.writeFieldEnd() + if self.o4 is not None: + oprot.writeFieldBegin("o4", TType.STRUCT, 4) + self.o4.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(create_wm_pool_result) +create_wm_pool_result.thrift_spec = ( + ( + 0, + TType.STRUCT, + "success", + [WMCreatePoolResponse, None], + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [AlreadyExistsException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [NoSuchObjectException, None], + None, + ), # 2 + ( + 3, + TType.STRUCT, + "o3", + [InvalidObjectException, None], + None, + ), # 3 + ( + 4, + TType.STRUCT, + "o4", + [MetaException, None], + None, + ), # 4 +) + + +class alter_wm_pool_args: + """ + Attributes: + - request + + """ + + def __init__( + self, + request=None, + ): + self.request = request + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.request = WMAlterPoolRequest() + self.request.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("alter_wm_pool_args") + if self.request is not None: + oprot.writeFieldBegin("request", TType.STRUCT, 1) + self.request.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(alter_wm_pool_args) +alter_wm_pool_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "request", + [WMAlterPoolRequest, None], + None, + ), # 1 +) + + +class alter_wm_pool_result: + """ + Attributes: + - success + - o1 + - o2 + - o3 + - o4 + + """ + + def __init__( + self, + success=None, + o1=None, + o2=None, + o3=None, + o4=None, + ): + self.success = success + self.o1 = o1 + self.o2 = o2 + self.o3 = o3 + self.o4 = o4 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = WMAlterPoolResponse() + self.success.read(iprot) + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = AlreadyExistsException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = NoSuchObjectException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRUCT: + self.o3 = InvalidObjectException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRUCT: + self.o4 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("alter_wm_pool_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + if self.o3 is not None: + oprot.writeFieldBegin("o3", TType.STRUCT, 3) + self.o3.write(oprot) + oprot.writeFieldEnd() + if self.o4 is not None: + oprot.writeFieldBegin("o4", TType.STRUCT, 4) + self.o4.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(alter_wm_pool_result) +alter_wm_pool_result.thrift_spec = ( + ( + 0, + TType.STRUCT, + "success", + [WMAlterPoolResponse, None], + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [AlreadyExistsException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [NoSuchObjectException, None], + None, + ), # 2 + ( + 3, + TType.STRUCT, + "o3", + [InvalidObjectException, None], + None, + ), # 3 + ( + 4, + TType.STRUCT, + "o4", + [MetaException, None], + None, + ), # 4 +) + + +class drop_wm_pool_args: + """ + Attributes: + - request + + """ + + def __init__( + self, + request=None, + ): + self.request = request + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.request = WMDropPoolRequest() + self.request.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("drop_wm_pool_args") + if self.request is not None: + oprot.writeFieldBegin("request", TType.STRUCT, 1) + self.request.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(drop_wm_pool_args) +drop_wm_pool_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "request", + [WMDropPoolRequest, None], + None, + ), # 1 +) + + +class drop_wm_pool_result: + """ + Attributes: + - success + - o1 + - o2 + - o3 + + """ + + def __init__( + self, + success=None, + o1=None, + o2=None, + o3=None, + ): + self.success = success + self.o1 = o1 + self.o2 = o2 + self.o3 = o3 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = WMDropPoolResponse() + self.success.read(iprot) + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = NoSuchObjectException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = InvalidOperationException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRUCT: + self.o3 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("drop_wm_pool_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + if self.o3 is not None: + oprot.writeFieldBegin("o3", TType.STRUCT, 3) + self.o3.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(drop_wm_pool_result) +drop_wm_pool_result.thrift_spec = ( + ( + 0, + TType.STRUCT, + "success", + [WMDropPoolResponse, None], + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [NoSuchObjectException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [InvalidOperationException, None], + None, + ), # 2 + ( + 3, + TType.STRUCT, + "o3", + [MetaException, None], + None, + ), # 3 +) + + +class create_or_update_wm_mapping_args: + """ + Attributes: + - request + + """ + + def __init__( + self, + request=None, + ): + self.request = request + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.request = WMCreateOrUpdateMappingRequest() + self.request.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("create_or_update_wm_mapping_args") + if self.request is not None: + oprot.writeFieldBegin("request", TType.STRUCT, 1) + self.request.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(create_or_update_wm_mapping_args) +create_or_update_wm_mapping_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "request", + [WMCreateOrUpdateMappingRequest, None], + None, + ), # 1 +) + + +class create_or_update_wm_mapping_result: + """ + Attributes: + - success + - o1 + - o2 + - o3 + - o4 + + """ + + def __init__( + self, + success=None, + o1=None, + o2=None, + o3=None, + o4=None, + ): + self.success = success + self.o1 = o1 + self.o2 = o2 + self.o3 = o3 + self.o4 = o4 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = WMCreateOrUpdateMappingResponse() + self.success.read(iprot) + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = AlreadyExistsException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = NoSuchObjectException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRUCT: + self.o3 = InvalidObjectException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRUCT: + self.o4 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("create_or_update_wm_mapping_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + if self.o3 is not None: + oprot.writeFieldBegin("o3", TType.STRUCT, 3) + self.o3.write(oprot) + oprot.writeFieldEnd() + if self.o4 is not None: + oprot.writeFieldBegin("o4", TType.STRUCT, 4) + self.o4.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(create_or_update_wm_mapping_result) +create_or_update_wm_mapping_result.thrift_spec = ( + ( + 0, + TType.STRUCT, + "success", + [WMCreateOrUpdateMappingResponse, None], + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [AlreadyExistsException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [NoSuchObjectException, None], + None, + ), # 2 + ( + 3, + TType.STRUCT, + "o3", + [InvalidObjectException, None], + None, + ), # 3 + ( + 4, + TType.STRUCT, + "o4", + [MetaException, None], + None, + ), # 4 +) + + +class drop_wm_mapping_args: + """ + Attributes: + - request + + """ + + def __init__( + self, + request=None, + ): + self.request = request + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.request = WMDropMappingRequest() + self.request.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("drop_wm_mapping_args") + if self.request is not None: + oprot.writeFieldBegin("request", TType.STRUCT, 1) + self.request.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(drop_wm_mapping_args) +drop_wm_mapping_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "request", + [WMDropMappingRequest, None], + None, + ), # 1 +) + + +class drop_wm_mapping_result: + """ + Attributes: + - success + - o1 + - o2 + - o3 + + """ + + def __init__( + self, + success=None, + o1=None, + o2=None, + o3=None, + ): + self.success = success + self.o1 = o1 + self.o2 = o2 + self.o3 = o3 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = WMDropMappingResponse() + self.success.read(iprot) + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = NoSuchObjectException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = InvalidOperationException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRUCT: + self.o3 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("drop_wm_mapping_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + if self.o3 is not None: + oprot.writeFieldBegin("o3", TType.STRUCT, 3) + self.o3.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(drop_wm_mapping_result) +drop_wm_mapping_result.thrift_spec = ( + ( + 0, + TType.STRUCT, + "success", + [WMDropMappingResponse, None], + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [NoSuchObjectException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [InvalidOperationException, None], + None, + ), # 2 + ( + 3, + TType.STRUCT, + "o3", + [MetaException, None], + None, + ), # 3 +) + + +class create_or_drop_wm_trigger_to_pool_mapping_args: + """ + Attributes: + - request + + """ + + def __init__( + self, + request=None, + ): + self.request = request + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.request = WMCreateOrDropTriggerToPoolMappingRequest() + self.request.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("create_or_drop_wm_trigger_to_pool_mapping_args") + if self.request is not None: + oprot.writeFieldBegin("request", TType.STRUCT, 1) + self.request.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(create_or_drop_wm_trigger_to_pool_mapping_args) +create_or_drop_wm_trigger_to_pool_mapping_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "request", + [WMCreateOrDropTriggerToPoolMappingRequest, None], + None, + ), # 1 +) + + +class create_or_drop_wm_trigger_to_pool_mapping_result: + """ + Attributes: + - success + - o1 + - o2 + - o3 + - o4 + + """ + + def __init__( + self, + success=None, + o1=None, + o2=None, + o3=None, + o4=None, + ): + self.success = success + self.o1 = o1 + self.o2 = o2 + self.o3 = o3 + self.o4 = o4 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = WMCreateOrDropTriggerToPoolMappingResponse() + self.success.read(iprot) + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = AlreadyExistsException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = NoSuchObjectException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRUCT: + self.o3 = InvalidObjectException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRUCT: + self.o4 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("create_or_drop_wm_trigger_to_pool_mapping_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + if self.o3 is not None: + oprot.writeFieldBegin("o3", TType.STRUCT, 3) + self.o3.write(oprot) + oprot.writeFieldEnd() + if self.o4 is not None: + oprot.writeFieldBegin("o4", TType.STRUCT, 4) + self.o4.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(create_or_drop_wm_trigger_to_pool_mapping_result) +create_or_drop_wm_trigger_to_pool_mapping_result.thrift_spec = ( + ( + 0, + TType.STRUCT, + "success", + [WMCreateOrDropTriggerToPoolMappingResponse, None], + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [AlreadyExistsException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [NoSuchObjectException, None], + None, + ), # 2 + ( + 3, + TType.STRUCT, + "o3", + [InvalidObjectException, None], + None, + ), # 3 + ( + 4, + TType.STRUCT, + "o4", + [MetaException, None], + None, + ), # 4 +) + + +class create_ischema_args: + """ + Attributes: + - schema + + """ + + def __init__( + self, + schema=None, + ): + self.schema = schema + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.schema = ISchema() + self.schema.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("create_ischema_args") + if self.schema is not None: + oprot.writeFieldBegin("schema", TType.STRUCT, 1) + self.schema.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(create_ischema_args) +create_ischema_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "schema", + [ISchema, None], + None, + ), # 1 +) + + +class create_ischema_result: + """ + Attributes: + - o1 + - o2 + - o3 + + """ + + def __init__( + self, + o1=None, + o2=None, + o3=None, + ): + self.o1 = o1 + self.o2 = o2 + self.o3 = o3 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.o1 = AlreadyExistsException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = NoSuchObjectException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRUCT: + self.o3 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("create_ischema_result") + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + if self.o3 is not None: + oprot.writeFieldBegin("o3", TType.STRUCT, 3) + self.o3.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(create_ischema_result) +create_ischema_result.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "o1", + [AlreadyExistsException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [NoSuchObjectException, None], + None, + ), # 2 + ( + 3, + TType.STRUCT, + "o3", + [MetaException, None], + None, + ), # 3 +) + + +class alter_ischema_args: + """ + Attributes: + - rqst + + """ + + def __init__( + self, + rqst=None, + ): + self.rqst = rqst + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.rqst = AlterISchemaRequest() + self.rqst.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("alter_ischema_args") + if self.rqst is not None: + oprot.writeFieldBegin("rqst", TType.STRUCT, 1) + self.rqst.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(alter_ischema_args) +alter_ischema_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "rqst", + [AlterISchemaRequest, None], + None, + ), # 1 +) + + +class alter_ischema_result: + """ + Attributes: + - o1 + - o2 + + """ + + def __init__( + self, + o1=None, + o2=None, + ): + self.o1 = o1 + self.o2 = o2 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.o1 = NoSuchObjectException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("alter_ischema_result") + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(alter_ischema_result) +alter_ischema_result.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "o1", + [NoSuchObjectException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [MetaException, None], + None, + ), # 2 +) + + +class get_ischema_args: + """ + Attributes: + - name + + """ + + def __init__( + self, + name=None, + ): + self.name = name + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.name = ISchemaName() + self.name.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_ischema_args") + if self.name is not None: + oprot.writeFieldBegin("name", TType.STRUCT, 1) + self.name.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_ischema_args) +get_ischema_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "name", + [ISchemaName, None], + None, + ), # 1 +) + + +class get_ischema_result: + """ + Attributes: + - success + - o1 + - o2 + + """ + + def __init__( + self, + success=None, + o1=None, + o2=None, + ): + self.success = success + self.o1 = o1 + self.o2 = o2 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = ISchema() + self.success.read(iprot) + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = NoSuchObjectException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_ischema_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_ischema_result) +get_ischema_result.thrift_spec = ( + ( + 0, + TType.STRUCT, + "success", + [ISchema, None], + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [NoSuchObjectException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [MetaException, None], + None, + ), # 2 +) + + +class drop_ischema_args: + """ + Attributes: + - name + + """ + + def __init__( + self, + name=None, + ): + self.name = name + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.name = ISchemaName() + self.name.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("drop_ischema_args") + if self.name is not None: + oprot.writeFieldBegin("name", TType.STRUCT, 1) + self.name.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(drop_ischema_args) +drop_ischema_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "name", + [ISchemaName, None], + None, + ), # 1 +) + + +class drop_ischema_result: + """ + Attributes: + - o1 + - o2 + - o3 + + """ + + def __init__( + self, + o1=None, + o2=None, + o3=None, + ): + self.o1 = o1 + self.o2 = o2 + self.o3 = o3 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.o1 = NoSuchObjectException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = InvalidOperationException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRUCT: + self.o3 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("drop_ischema_result") + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + if self.o3 is not None: + oprot.writeFieldBegin("o3", TType.STRUCT, 3) + self.o3.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(drop_ischema_result) +drop_ischema_result.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "o1", + [NoSuchObjectException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [InvalidOperationException, None], + None, + ), # 2 + ( + 3, + TType.STRUCT, + "o3", + [MetaException, None], + None, + ), # 3 +) + + +class add_schema_version_args: + """ + Attributes: + - schemaVersion + + """ + + def __init__( + self, + schemaVersion=None, + ): + self.schemaVersion = schemaVersion + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.schemaVersion = SchemaVersion() + self.schemaVersion.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("add_schema_version_args") + if self.schemaVersion is not None: + oprot.writeFieldBegin("schemaVersion", TType.STRUCT, 1) + self.schemaVersion.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(add_schema_version_args) +add_schema_version_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "schemaVersion", + [SchemaVersion, None], + None, + ), # 1 +) + + +class add_schema_version_result: + """ + Attributes: + - o1 + - o2 + - o3 + + """ + + def __init__( + self, + o1=None, + o2=None, + o3=None, + ): + self.o1 = o1 + self.o2 = o2 + self.o3 = o3 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.o1 = AlreadyExistsException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = NoSuchObjectException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRUCT: + self.o3 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("add_schema_version_result") + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + if self.o3 is not None: + oprot.writeFieldBegin("o3", TType.STRUCT, 3) + self.o3.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(add_schema_version_result) +add_schema_version_result.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "o1", + [AlreadyExistsException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [NoSuchObjectException, None], + None, + ), # 2 + ( + 3, + TType.STRUCT, + "o3", + [MetaException, None], + None, + ), # 3 +) + + +class get_schema_version_args: + """ + Attributes: + - schemaVersion + + """ + + def __init__( + self, + schemaVersion=None, + ): + self.schemaVersion = schemaVersion + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.schemaVersion = SchemaVersionDescriptor() + self.schemaVersion.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_schema_version_args") + if self.schemaVersion is not None: + oprot.writeFieldBegin("schemaVersion", TType.STRUCT, 1) + self.schemaVersion.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_schema_version_args) +get_schema_version_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "schemaVersion", + [SchemaVersionDescriptor, None], + None, + ), # 1 +) + + +class get_schema_version_result: + """ + Attributes: + - success + - o1 + - o2 + + """ + + def __init__( + self, + success=None, + o1=None, + o2=None, + ): + self.success = success + self.o1 = o1 + self.o2 = o2 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = SchemaVersion() + self.success.read(iprot) + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = NoSuchObjectException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_schema_version_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_schema_version_result) +get_schema_version_result.thrift_spec = ( + ( + 0, + TType.STRUCT, + "success", + [SchemaVersion, None], + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [NoSuchObjectException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [MetaException, None], + None, + ), # 2 +) + + +class get_schema_latest_version_args: + """ + Attributes: + - schemaName + + """ + + def __init__( + self, + schemaName=None, + ): + self.schemaName = schemaName + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.schemaName = ISchemaName() + self.schemaName.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_schema_latest_version_args") + if self.schemaName is not None: + oprot.writeFieldBegin("schemaName", TType.STRUCT, 1) + self.schemaName.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_schema_latest_version_args) +get_schema_latest_version_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "schemaName", + [ISchemaName, None], + None, + ), # 1 +) + + +class get_schema_latest_version_result: + """ + Attributes: + - success + - o1 + - o2 + + """ + + def __init__( + self, + success=None, + o1=None, + o2=None, + ): + self.success = success + self.o1 = o1 + self.o2 = o2 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = SchemaVersion() + self.success.read(iprot) + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = NoSuchObjectException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_schema_latest_version_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_schema_latest_version_result) +get_schema_latest_version_result.thrift_spec = ( + ( + 0, + TType.STRUCT, + "success", + [SchemaVersion, None], + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [NoSuchObjectException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [MetaException, None], + None, + ), # 2 +) + + +class get_schema_all_versions_args: + """ + Attributes: + - schemaName + + """ + + def __init__( + self, + schemaName=None, + ): + self.schemaName = schemaName + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.schemaName = ISchemaName() + self.schemaName.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_schema_all_versions_args") + if self.schemaName is not None: + oprot.writeFieldBegin("schemaName", TType.STRUCT, 1) + self.schemaName.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_schema_all_versions_args) +get_schema_all_versions_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "schemaName", + [ISchemaName, None], + None, + ), # 1 +) + + +class get_schema_all_versions_result: + """ + Attributes: + - success + - o1 + - o2 + + """ + + def __init__( + self, + success=None, + o1=None, + o2=None, + ): + self.success = success + self.o1 = o1 + self.o2 = o2 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.LIST: + self.success = [] + (_etype1775, _size1772) = iprot.readListBegin() + for _i1776 in range(_size1772): + _elem1777 = SchemaVersion() + _elem1777.read(iprot) + self.success.append(_elem1777) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = NoSuchObjectException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_schema_all_versions_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.LIST, 0) + oprot.writeListBegin(TType.STRUCT, len(self.success)) + for iter1778 in self.success: + iter1778.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_schema_all_versions_result) +get_schema_all_versions_result.thrift_spec = ( + ( + 0, + TType.LIST, + "success", + (TType.STRUCT, [SchemaVersion, None], False), + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [NoSuchObjectException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [MetaException, None], + None, + ), # 2 +) + + +class drop_schema_version_args: + """ + Attributes: + - schemaVersion + + """ + + def __init__( + self, + schemaVersion=None, + ): + self.schemaVersion = schemaVersion + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.schemaVersion = SchemaVersionDescriptor() + self.schemaVersion.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("drop_schema_version_args") + if self.schemaVersion is not None: + oprot.writeFieldBegin("schemaVersion", TType.STRUCT, 1) + self.schemaVersion.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(drop_schema_version_args) +drop_schema_version_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "schemaVersion", + [SchemaVersionDescriptor, None], + None, + ), # 1 +) + + +class drop_schema_version_result: + """ + Attributes: + - o1 + - o2 + + """ + + def __init__( + self, + o1=None, + o2=None, + ): + self.o1 = o1 + self.o2 = o2 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.o1 = NoSuchObjectException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("drop_schema_version_result") + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(drop_schema_version_result) +drop_schema_version_result.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "o1", + [NoSuchObjectException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [MetaException, None], + None, + ), # 2 +) + + +class get_schemas_by_cols_args: + """ + Attributes: + - rqst + + """ + + def __init__( + self, + rqst=None, + ): + self.rqst = rqst + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.rqst = FindSchemasByColsRqst() + self.rqst.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_schemas_by_cols_args") + if self.rqst is not None: + oprot.writeFieldBegin("rqst", TType.STRUCT, 1) + self.rqst.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_schemas_by_cols_args) +get_schemas_by_cols_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "rqst", + [FindSchemasByColsRqst, None], + None, + ), # 1 +) + + +class get_schemas_by_cols_result: + """ + Attributes: + - success + - o1 + + """ + + def __init__( + self, + success=None, + o1=None, + ): + self.success = success + self.o1 = o1 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = FindSchemasByColsResp() + self.success.read(iprot) + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_schemas_by_cols_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_schemas_by_cols_result) +get_schemas_by_cols_result.thrift_spec = ( + ( + 0, + TType.STRUCT, + "success", + [FindSchemasByColsResp, None], + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [MetaException, None], + None, + ), # 1 +) + + +class map_schema_version_to_serde_args: + """ + Attributes: + - rqst + + """ + + def __init__( + self, + rqst=None, + ): + self.rqst = rqst + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.rqst = MapSchemaVersionToSerdeRequest() + self.rqst.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("map_schema_version_to_serde_args") + if self.rqst is not None: + oprot.writeFieldBegin("rqst", TType.STRUCT, 1) + self.rqst.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(map_schema_version_to_serde_args) +map_schema_version_to_serde_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "rqst", + [MapSchemaVersionToSerdeRequest, None], + None, + ), # 1 +) + + +class map_schema_version_to_serde_result: + """ + Attributes: + - o1 + - o2 + + """ + + def __init__( + self, + o1=None, + o2=None, + ): + self.o1 = o1 + self.o2 = o2 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.o1 = NoSuchObjectException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("map_schema_version_to_serde_result") + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(map_schema_version_to_serde_result) +map_schema_version_to_serde_result.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "o1", + [NoSuchObjectException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [MetaException, None], + None, + ), # 2 +) + + +class set_schema_version_state_args: + """ + Attributes: + - rqst + + """ + + def __init__( + self, + rqst=None, + ): + self.rqst = rqst + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.rqst = SetSchemaVersionStateRequest() + self.rqst.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("set_schema_version_state_args") + if self.rqst is not None: + oprot.writeFieldBegin("rqst", TType.STRUCT, 1) + self.rqst.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(set_schema_version_state_args) +set_schema_version_state_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "rqst", + [SetSchemaVersionStateRequest, None], + None, + ), # 1 +) + + +class set_schema_version_state_result: + """ + Attributes: + - o1 + - o2 + - o3 + + """ + + def __init__( + self, + o1=None, + o2=None, + o3=None, + ): + self.o1 = o1 + self.o2 = o2 + self.o3 = o3 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.o1 = NoSuchObjectException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = InvalidOperationException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRUCT: + self.o3 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("set_schema_version_state_result") + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + if self.o3 is not None: + oprot.writeFieldBegin("o3", TType.STRUCT, 3) + self.o3.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(set_schema_version_state_result) +set_schema_version_state_result.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "o1", + [NoSuchObjectException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [InvalidOperationException, None], + None, + ), # 2 + ( + 3, + TType.STRUCT, + "o3", + [MetaException, None], + None, + ), # 3 +) + + +class add_serde_args: + """ + Attributes: + - serde + + """ + + def __init__( + self, + serde=None, + ): + self.serde = serde + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.serde = SerDeInfo() + self.serde.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("add_serde_args") + if self.serde is not None: + oprot.writeFieldBegin("serde", TType.STRUCT, 1) + self.serde.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(add_serde_args) +add_serde_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "serde", + [SerDeInfo, None], + None, + ), # 1 +) + + +class add_serde_result: + """ + Attributes: + - o1 + - o2 + + """ + + def __init__( + self, + o1=None, + o2=None, + ): + self.o1 = o1 + self.o2 = o2 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.o1 = AlreadyExistsException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("add_serde_result") + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(add_serde_result) +add_serde_result.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "o1", + [AlreadyExistsException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [MetaException, None], + None, + ), # 2 +) + + +class get_serde_args: + """ + Attributes: + - rqst + + """ + + def __init__( + self, + rqst=None, + ): + self.rqst = rqst + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.rqst = GetSerdeRequest() + self.rqst.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_serde_args") + if self.rqst is not None: + oprot.writeFieldBegin("rqst", TType.STRUCT, 1) + self.rqst.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_serde_args) +get_serde_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "rqst", + [GetSerdeRequest, None], + None, + ), # 1 +) + + +class get_serde_result: + """ + Attributes: + - success + - o1 + - o2 + + """ + + def __init__( + self, + success=None, + o1=None, + o2=None, + ): + self.success = success + self.o1 = o1 + self.o2 = o2 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = SerDeInfo() + self.success.read(iprot) + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = NoSuchObjectException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_serde_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_serde_result) +get_serde_result.thrift_spec = ( + ( + 0, + TType.STRUCT, + "success", + [SerDeInfo, None], + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [NoSuchObjectException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [MetaException, None], + None, + ), # 2 +) + + +class get_lock_materialization_rebuild_args: + """ + Attributes: + - dbName + - tableName + - txnId + + """ + + def __init__( + self, + dbName=None, + tableName=None, + txnId=None, + ): + self.dbName = dbName + self.tableName = tableName + self.txnId = txnId + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.dbName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.tableName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.I64: + self.txnId = iprot.readI64() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_lock_materialization_rebuild_args") + if self.dbName is not None: + oprot.writeFieldBegin("dbName", TType.STRING, 1) + oprot.writeString(self.dbName.encode("utf-8") if sys.version_info[0] == 2 else self.dbName) + oprot.writeFieldEnd() + if self.tableName is not None: + oprot.writeFieldBegin("tableName", TType.STRING, 2) + oprot.writeString(self.tableName.encode("utf-8") if sys.version_info[0] == 2 else self.tableName) + oprot.writeFieldEnd() + if self.txnId is not None: + oprot.writeFieldBegin("txnId", TType.I64, 3) + oprot.writeI64(self.txnId) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_lock_materialization_rebuild_args) +get_lock_materialization_rebuild_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "dbName", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "tableName", + "UTF8", + None, + ), # 2 + ( + 3, + TType.I64, + "txnId", + None, + None, + ), # 3 +) + + +class get_lock_materialization_rebuild_result: + """ + Attributes: + - success + + """ + + def __init__( + self, + success=None, + ): + self.success = success + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = LockResponse() + self.success.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_lock_materialization_rebuild_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_lock_materialization_rebuild_result) +get_lock_materialization_rebuild_result.thrift_spec = ( + ( + 0, + TType.STRUCT, + "success", + [LockResponse, None], + None, + ), # 0 +) + + +class heartbeat_lock_materialization_rebuild_args: + """ + Attributes: + - dbName + - tableName + - txnId + + """ + + def __init__( + self, + dbName=None, + tableName=None, + txnId=None, + ): + self.dbName = dbName + self.tableName = tableName + self.txnId = txnId + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.dbName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.tableName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.I64: + self.txnId = iprot.readI64() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("heartbeat_lock_materialization_rebuild_args") + if self.dbName is not None: + oprot.writeFieldBegin("dbName", TType.STRING, 1) + oprot.writeString(self.dbName.encode("utf-8") if sys.version_info[0] == 2 else self.dbName) + oprot.writeFieldEnd() + if self.tableName is not None: + oprot.writeFieldBegin("tableName", TType.STRING, 2) + oprot.writeString(self.tableName.encode("utf-8") if sys.version_info[0] == 2 else self.tableName) + oprot.writeFieldEnd() + if self.txnId is not None: + oprot.writeFieldBegin("txnId", TType.I64, 3) + oprot.writeI64(self.txnId) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(heartbeat_lock_materialization_rebuild_args) +heartbeat_lock_materialization_rebuild_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "dbName", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "tableName", + "UTF8", + None, + ), # 2 + ( + 3, + TType.I64, + "txnId", + None, + None, + ), # 3 +) + + +class heartbeat_lock_materialization_rebuild_result: + """ + Attributes: + - success + + """ + + def __init__( + self, + success=None, + ): + self.success = success + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.BOOL: + self.success = iprot.readBool() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("heartbeat_lock_materialization_rebuild_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.BOOL, 0) + oprot.writeBool(self.success) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(heartbeat_lock_materialization_rebuild_result) +heartbeat_lock_materialization_rebuild_result.thrift_spec = ( + ( + 0, + TType.BOOL, + "success", + None, + None, + ), # 0 +) + + +class add_runtime_stats_args: + """ + Attributes: + - stat + + """ + + def __init__( + self, + stat=None, + ): + self.stat = stat + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.stat = RuntimeStat() + self.stat.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("add_runtime_stats_args") + if self.stat is not None: + oprot.writeFieldBegin("stat", TType.STRUCT, 1) + self.stat.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(add_runtime_stats_args) +add_runtime_stats_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "stat", + [RuntimeStat, None], + None, + ), # 1 +) + + +class add_runtime_stats_result: + """ + Attributes: + - o1 + + """ + + def __init__( + self, + o1=None, + ): + self.o1 = o1 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("add_runtime_stats_result") + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(add_runtime_stats_result) +add_runtime_stats_result.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "o1", + [MetaException, None], + None, + ), # 1 +) + + +class get_runtime_stats_args: + """ + Attributes: + - rqst + + """ + + def __init__( + self, + rqst=None, + ): + self.rqst = rqst + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.rqst = GetRuntimeStatsRequest() + self.rqst.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_runtime_stats_args") + if self.rqst is not None: + oprot.writeFieldBegin("rqst", TType.STRUCT, 1) + self.rqst.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_runtime_stats_args) +get_runtime_stats_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "rqst", + [GetRuntimeStatsRequest, None], + None, + ), # 1 +) + + +class get_runtime_stats_result: + """ + Attributes: + - success + - o1 + + """ + + def __init__( + self, + success=None, + o1=None, + ): + self.success = success + self.o1 = o1 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.LIST: + self.success = [] + (_etype1782, _size1779) = iprot.readListBegin() + for _i1783 in range(_size1779): + _elem1784 = RuntimeStat() + _elem1784.read(iprot) + self.success.append(_elem1784) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_runtime_stats_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.LIST, 0) + oprot.writeListBegin(TType.STRUCT, len(self.success)) + for iter1785 in self.success: + iter1785.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_runtime_stats_result) +get_runtime_stats_result.thrift_spec = ( + ( + 0, + TType.LIST, + "success", + (TType.STRUCT, [RuntimeStat, None], False), + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [MetaException, None], + None, + ), # 1 +) + + +class get_partitions_with_specs_args: + """ + Attributes: + - request + + """ + + def __init__( + self, + request=None, + ): + self.request = request + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.request = GetPartitionsRequest() + self.request.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_partitions_with_specs_args") + if self.request is not None: + oprot.writeFieldBegin("request", TType.STRUCT, 1) + self.request.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_partitions_with_specs_args) +get_partitions_with_specs_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "request", + [GetPartitionsRequest, None], + None, + ), # 1 +) + + +class get_partitions_with_specs_result: + """ + Attributes: + - success + - o1 + + """ + + def __init__( + self, + success=None, + o1=None, + ): + self.success = success + self.o1 = o1 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = GetPartitionsResponse() + self.success.read(iprot) + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_partitions_with_specs_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_partitions_with_specs_result) +get_partitions_with_specs_result.thrift_spec = ( + ( + 0, + TType.STRUCT, + "success", + [GetPartitionsResponse, None], + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [MetaException, None], + None, + ), # 1 +) + + +class scheduled_query_poll_args: + """ + Attributes: + - request + + """ + + def __init__( + self, + request=None, + ): + self.request = request + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.request = ScheduledQueryPollRequest() + self.request.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("scheduled_query_poll_args") + if self.request is not None: + oprot.writeFieldBegin("request", TType.STRUCT, 1) + self.request.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(scheduled_query_poll_args) +scheduled_query_poll_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "request", + [ScheduledQueryPollRequest, None], + None, + ), # 1 +) + + +class scheduled_query_poll_result: + """ + Attributes: + - success + - o1 + + """ + + def __init__( + self, + success=None, + o1=None, + ): + self.success = success + self.o1 = o1 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = ScheduledQueryPollResponse() + self.success.read(iprot) + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("scheduled_query_poll_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(scheduled_query_poll_result) +scheduled_query_poll_result.thrift_spec = ( + ( + 0, + TType.STRUCT, + "success", + [ScheduledQueryPollResponse, None], + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [MetaException, None], + None, + ), # 1 +) + + +class scheduled_query_maintenance_args: + """ + Attributes: + - request + + """ + + def __init__( + self, + request=None, + ): + self.request = request + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.request = ScheduledQueryMaintenanceRequest() + self.request.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("scheduled_query_maintenance_args") + if self.request is not None: + oprot.writeFieldBegin("request", TType.STRUCT, 1) + self.request.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(scheduled_query_maintenance_args) +scheduled_query_maintenance_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "request", + [ScheduledQueryMaintenanceRequest, None], + None, + ), # 1 +) + + +class scheduled_query_maintenance_result: + """ + Attributes: + - o1 + - o2 + - o3 + - o4 + + """ + + def __init__( + self, + o1=None, + o2=None, + o3=None, + o4=None, + ): + self.o1 = o1 + self.o2 = o2 + self.o3 = o3 + self.o4 = o4 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = NoSuchObjectException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRUCT: + self.o3 = AlreadyExistsException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRUCT: + self.o4 = InvalidInputException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("scheduled_query_maintenance_result") + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + if self.o3 is not None: + oprot.writeFieldBegin("o3", TType.STRUCT, 3) + self.o3.write(oprot) + oprot.writeFieldEnd() + if self.o4 is not None: + oprot.writeFieldBegin("o4", TType.STRUCT, 4) + self.o4.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(scheduled_query_maintenance_result) +scheduled_query_maintenance_result.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "o1", + [MetaException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [NoSuchObjectException, None], + None, + ), # 2 + ( + 3, + TType.STRUCT, + "o3", + [AlreadyExistsException, None], + None, + ), # 3 + ( + 4, + TType.STRUCT, + "o4", + [InvalidInputException, None], + None, + ), # 4 +) + + +class scheduled_query_progress_args: + """ + Attributes: + - info + + """ + + def __init__( + self, + info=None, + ): + self.info = info + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.info = ScheduledQueryProgressInfo() + self.info.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("scheduled_query_progress_args") + if self.info is not None: + oprot.writeFieldBegin("info", TType.STRUCT, 1) + self.info.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(scheduled_query_progress_args) +scheduled_query_progress_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "info", + [ScheduledQueryProgressInfo, None], + None, + ), # 1 +) + + +class scheduled_query_progress_result: + """ + Attributes: + - o1 + - o2 + + """ + + def __init__( + self, + o1=None, + o2=None, + ): + self.o1 = o1 + self.o2 = o2 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = InvalidOperationException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("scheduled_query_progress_result") + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(scheduled_query_progress_result) +scheduled_query_progress_result.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "o1", + [MetaException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [InvalidOperationException, None], + None, + ), # 2 +) + + +class get_scheduled_query_args: + """ + Attributes: + - scheduleKey + + """ + + def __init__( + self, + scheduleKey=None, + ): + self.scheduleKey = scheduleKey + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.scheduleKey = ScheduledQueryKey() + self.scheduleKey.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_scheduled_query_args") + if self.scheduleKey is not None: + oprot.writeFieldBegin("scheduleKey", TType.STRUCT, 1) + self.scheduleKey.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_scheduled_query_args) +get_scheduled_query_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "scheduleKey", + [ScheduledQueryKey, None], + None, + ), # 1 +) + + +class get_scheduled_query_result: + """ + Attributes: + - success + - o1 + - o2 + + """ + + def __init__( + self, + success=None, + o1=None, + o2=None, + ): + self.success = success + self.o1 = o1 + self.o2 = o2 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = ScheduledQuery() + self.success.read(iprot) + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = NoSuchObjectException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_scheduled_query_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_scheduled_query_result) +get_scheduled_query_result.thrift_spec = ( + ( + 0, + TType.STRUCT, + "success", + [ScheduledQuery, None], + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [MetaException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [NoSuchObjectException, None], + None, + ), # 2 +) + + +class add_replication_metrics_args: + """ + Attributes: + - replicationMetricList + + """ + + def __init__( + self, + replicationMetricList=None, + ): + self.replicationMetricList = replicationMetricList + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.replicationMetricList = ReplicationMetricList() + self.replicationMetricList.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("add_replication_metrics_args") + if self.replicationMetricList is not None: + oprot.writeFieldBegin("replicationMetricList", TType.STRUCT, 1) + self.replicationMetricList.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(add_replication_metrics_args) +add_replication_metrics_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "replicationMetricList", + [ReplicationMetricList, None], + None, + ), # 1 +) + + +class add_replication_metrics_result: + """ + Attributes: + - o1 + + """ + + def __init__( + self, + o1=None, + ): + self.o1 = o1 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("add_replication_metrics_result") + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(add_replication_metrics_result) +add_replication_metrics_result.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "o1", + [MetaException, None], + None, + ), # 1 +) + + +class get_replication_metrics_args: + """ + Attributes: + - rqst + + """ + + def __init__( + self, + rqst=None, + ): + self.rqst = rqst + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.rqst = GetReplicationMetricsRequest() + self.rqst.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_replication_metrics_args") + if self.rqst is not None: + oprot.writeFieldBegin("rqst", TType.STRUCT, 1) + self.rqst.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_replication_metrics_args) +get_replication_metrics_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "rqst", + [GetReplicationMetricsRequest, None], + None, + ), # 1 +) + + +class get_replication_metrics_result: + """ + Attributes: + - success + - o1 + + """ + + def __init__( + self, + success=None, + o1=None, + ): + self.success = success + self.o1 = o1 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = ReplicationMetricList() + self.success.read(iprot) + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_replication_metrics_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_replication_metrics_result) +get_replication_metrics_result.thrift_spec = ( + ( + 0, + TType.STRUCT, + "success", + [ReplicationMetricList, None], + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [MetaException, None], + None, + ), # 1 +) + + +class get_open_txns_req_args: + """ + Attributes: + - getOpenTxnsRequest + + """ + + def __init__( + self, + getOpenTxnsRequest=None, + ): + self.getOpenTxnsRequest = getOpenTxnsRequest + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.getOpenTxnsRequest = GetOpenTxnsRequest() + self.getOpenTxnsRequest.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_open_txns_req_args") + if self.getOpenTxnsRequest is not None: + oprot.writeFieldBegin("getOpenTxnsRequest", TType.STRUCT, 1) + self.getOpenTxnsRequest.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_open_txns_req_args) +get_open_txns_req_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "getOpenTxnsRequest", + [GetOpenTxnsRequest, None], + None, + ), # 1 +) + + +class get_open_txns_req_result: + """ + Attributes: + - success + + """ + + def __init__( + self, + success=None, + ): + self.success = success + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = GetOpenTxnsResponse() + self.success.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_open_txns_req_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_open_txns_req_result) +get_open_txns_req_result.thrift_spec = ( + ( + 0, + TType.STRUCT, + "success", + [GetOpenTxnsResponse, None], + None, + ), # 0 +) + + +class create_stored_procedure_args: + """ + Attributes: + - proc + + """ + + def __init__( + self, + proc=None, + ): + self.proc = proc + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.proc = StoredProcedure() + self.proc.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("create_stored_procedure_args") + if self.proc is not None: + oprot.writeFieldBegin("proc", TType.STRUCT, 1) + self.proc.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(create_stored_procedure_args) +create_stored_procedure_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "proc", + [StoredProcedure, None], + None, + ), # 1 +) + + +class create_stored_procedure_result: + """ + Attributes: + - o1 + - o2 + + """ + + def __init__( + self, + o1=None, + o2=None, + ): + self.o1 = o1 + self.o2 = o2 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.o1 = NoSuchObjectException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("create_stored_procedure_result") + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(create_stored_procedure_result) +create_stored_procedure_result.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "o1", + [NoSuchObjectException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [MetaException, None], + None, + ), # 2 +) + + +class get_stored_procedure_args: + """ + Attributes: + - request + + """ + + def __init__( + self, + request=None, + ): + self.request = request + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.request = StoredProcedureRequest() + self.request.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_stored_procedure_args") + if self.request is not None: + oprot.writeFieldBegin("request", TType.STRUCT, 1) + self.request.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_stored_procedure_args) +get_stored_procedure_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "request", + [StoredProcedureRequest, None], + None, + ), # 1 +) + + +class get_stored_procedure_result: + """ + Attributes: + - success + - o1 + - o2 + + """ + + def __init__( + self, + success=None, + o1=None, + o2=None, + ): + self.success = success + self.o1 = o1 + self.o2 = o2 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = StoredProcedure() + self.success.read(iprot) + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = NoSuchObjectException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_stored_procedure_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_stored_procedure_result) +get_stored_procedure_result.thrift_spec = ( + ( + 0, + TType.STRUCT, + "success", + [StoredProcedure, None], + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [MetaException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [NoSuchObjectException, None], + None, + ), # 2 +) + + +class drop_stored_procedure_args: + """ + Attributes: + - request + + """ + + def __init__( + self, + request=None, + ): + self.request = request + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.request = StoredProcedureRequest() + self.request.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("drop_stored_procedure_args") + if self.request is not None: + oprot.writeFieldBegin("request", TType.STRUCT, 1) + self.request.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(drop_stored_procedure_args) +drop_stored_procedure_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "request", + [StoredProcedureRequest, None], + None, + ), # 1 +) + + +class drop_stored_procedure_result: + """ + Attributes: + - o1 + + """ + + def __init__( + self, + o1=None, + ): + self.o1 = o1 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("drop_stored_procedure_result") + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(drop_stored_procedure_result) +drop_stored_procedure_result.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "o1", + [MetaException, None], + None, + ), # 1 +) + + +class get_all_stored_procedures_args: + """ + Attributes: + - request + + """ + + def __init__( + self, + request=None, + ): + self.request = request + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.request = ListStoredProcedureRequest() + self.request.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_all_stored_procedures_args") + if self.request is not None: + oprot.writeFieldBegin("request", TType.STRUCT, 1) + self.request.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_all_stored_procedures_args) +get_all_stored_procedures_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "request", + [ListStoredProcedureRequest, None], + None, + ), # 1 +) + + +class get_all_stored_procedures_result: + """ + Attributes: + - success + - o1 + + """ + + def __init__( + self, + success=None, + o1=None, + ): + self.success = success + self.o1 = o1 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.LIST: + self.success = [] + (_etype1789, _size1786) = iprot.readListBegin() + for _i1790 in range(_size1786): + _elem1791 = ( + iprot.readString().decode("utf-8", errors="replace") + if sys.version_info[0] == 2 + else iprot.readString() + ) + self.success.append(_elem1791) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_all_stored_procedures_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.LIST, 0) + oprot.writeListBegin(TType.STRING, len(self.success)) + for iter1792 in self.success: + oprot.writeString(iter1792.encode("utf-8") if sys.version_info[0] == 2 else iter1792) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_all_stored_procedures_result) +get_all_stored_procedures_result.thrift_spec = ( + ( + 0, + TType.LIST, + "success", + (TType.STRING, "UTF8", False), + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [MetaException, None], + None, + ), # 1 +) + + +class find_package_args: + """ + Attributes: + - request + + """ + + def __init__( + self, + request=None, + ): + self.request = request + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.request = GetPackageRequest() + self.request.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("find_package_args") + if self.request is not None: + oprot.writeFieldBegin("request", TType.STRUCT, 1) + self.request.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(find_package_args) +find_package_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "request", + [GetPackageRequest, None], + None, + ), # 1 +) + + +class find_package_result: + """ + Attributes: + - success + - o1 + - o2 + + """ + + def __init__( + self, + success=None, + o1=None, + o2=None, + ): + self.success = success + self.o1 = o1 + self.o2 = o2 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = Package() + self.success.read(iprot) + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = NoSuchObjectException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("find_package_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin("o2", TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(find_package_result) +find_package_result.thrift_spec = ( + ( + 0, + TType.STRUCT, + "success", + [Package, None], + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [MetaException, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "o2", + [NoSuchObjectException, None], + None, + ), # 2 +) + + +class add_package_args: + """ + Attributes: + - request + + """ + + def __init__( + self, + request=None, + ): + self.request = request + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.request = AddPackageRequest() + self.request.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("add_package_args") + if self.request is not None: + oprot.writeFieldBegin("request", TType.STRUCT, 1) + self.request.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(add_package_args) +add_package_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "request", + [AddPackageRequest, None], + None, + ), # 1 +) + + +class add_package_result: + """ + Attributes: + - o1 + + """ + + def __init__( + self, + o1=None, + ): + self.o1 = o1 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("add_package_result") + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(add_package_result) +add_package_result.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "o1", + [MetaException, None], + None, + ), # 1 +) + + +class get_all_packages_args: + """ + Attributes: + - request + + """ + + def __init__( + self, + request=None, + ): + self.request = request + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.request = ListPackageRequest() + self.request.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_all_packages_args") + if self.request is not None: + oprot.writeFieldBegin("request", TType.STRUCT, 1) + self.request.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_all_packages_args) +get_all_packages_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "request", + [ListPackageRequest, None], + None, + ), # 1 +) + + +class get_all_packages_result: + """ + Attributes: + - success + - o1 + + """ + + def __init__( + self, + success=None, + o1=None, + ): + self.success = success + self.o1 = o1 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.LIST: + self.success = [] + (_etype1796, _size1793) = iprot.readListBegin() + for _i1797 in range(_size1793): + _elem1798 = ( + iprot.readString().decode("utf-8", errors="replace") + if sys.version_info[0] == 2 + else iprot.readString() + ) + self.success.append(_elem1798) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_all_packages_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.LIST, 0) + oprot.writeListBegin(TType.STRING, len(self.success)) + for iter1799 in self.success: + oprot.writeString(iter1799.encode("utf-8") if sys.version_info[0] == 2 else iter1799) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_all_packages_result) +get_all_packages_result.thrift_spec = ( + ( + 0, + TType.LIST, + "success", + (TType.STRING, "UTF8", False), + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [MetaException, None], + None, + ), # 1 +) + + +class drop_package_args: + """ + Attributes: + - request + + """ + + def __init__( + self, + request=None, + ): + self.request = request + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.request = DropPackageRequest() + self.request.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("drop_package_args") + if self.request is not None: + oprot.writeFieldBegin("request", TType.STRUCT, 1) + self.request.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(drop_package_args) +drop_package_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "request", + [DropPackageRequest, None], + None, + ), # 1 +) + + +class drop_package_result: + """ + Attributes: + - o1 + + """ + + def __init__( + self, + o1=None, + ): + self.o1 = o1 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("drop_package_result") + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(drop_package_result) +drop_package_result.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "o1", + [MetaException, None], + None, + ), # 1 +) + + +class get_all_write_event_info_args: + """ + Attributes: + - request + + """ + + def __init__( + self, + request=None, + ): + self.request = request + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.request = GetAllWriteEventInfoRequest() + self.request.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_all_write_event_info_args") + if self.request is not None: + oprot.writeFieldBegin("request", TType.STRUCT, 1) + self.request.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_all_write_event_info_args) +get_all_write_event_info_args.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "request", + [GetAllWriteEventInfoRequest, None], + None, + ), # 1 +) + + +class get_all_write_event_info_result: + """ + Attributes: + - success + - o1 + + """ + + def __init__( + self, + success=None, + o1=None, + ): + self.success = success + self.o1 = o1 + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.LIST: + self.success = [] + (_etype1803, _size1800) = iprot.readListBegin() + for _i1804 in range(_size1800): + _elem1805 = WriteEventInfo() + _elem1805.read(iprot) + self.success.append(_elem1805) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("get_all_write_event_info_result") + if self.success is not None: + oprot.writeFieldBegin("success", TType.LIST, 0) + oprot.writeListBegin(TType.STRUCT, len(self.success)) + for iter1806 in self.success: + iter1806.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin("o1", TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(get_all_write_event_info_result) +get_all_write_event_info_result.thrift_spec = ( + ( + 0, + TType.LIST, + "success", + (TType.STRUCT, [WriteEventInfo, None], False), + None, + ), # 0 + ( + 1, + TType.STRUCT, + "o1", + [MetaException, None], + None, + ), # 1 +) +fix_spec(all_structs) +del all_structs diff --git a/vendor/hive_metastore/__init__.py b/vendor/hive_metastore/__init__.py new file mode 100644 index 0000000000..178d664d81 --- /dev/null +++ b/vendor/hive_metastore/__init__.py @@ -0,0 +1,17 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +__all__ = ["ttypes", "constants", "ThriftHiveMetastore"] diff --git a/vendor/hive_metastore/constants.py b/vendor/hive_metastore/constants.py new file mode 100644 index 0000000000..218e527553 --- /dev/null +++ b/vendor/hive_metastore/constants.py @@ -0,0 +1,66 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# +# Autogenerated by Thrift Compiler (0.16.0) +# +# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING +# +# options string: py +# + + + + +DDL_TIME = "transient_lastDdlTime" +ACCESSTYPE_NONE = 1 +ACCESSTYPE_READONLY = 2 +ACCESSTYPE_WRITEONLY = 4 +ACCESSTYPE_READWRITE = 8 +HIVE_FILTER_FIELD_OWNER = "hive_filter_field_owner__" +HIVE_FILTER_FIELD_PARAMS = "hive_filter_field_params__" +HIVE_FILTER_FIELD_LAST_ACCESS = "hive_filter_field_last_access__" +IS_ARCHIVED = "is_archived" +ORIGINAL_LOCATION = "original_location" +IS_IMMUTABLE = "immutable" +META_TABLE_COLUMNS = "columns" +META_TABLE_COLUMN_TYPES = "columns.types" +BUCKET_FIELD_NAME = "bucket_field_name" +BUCKET_COUNT = "bucket_count" +FIELD_TO_DIMENSION = "field_to_dimension" +META_TABLE_NAME = "name" +META_TABLE_DB = "db" +META_TABLE_LOCATION = "location" +META_TABLE_SERDE = "serde" +META_TABLE_PARTITION_COLUMNS = "partition_columns" +META_TABLE_PARTITION_COLUMN_TYPES = "partition_columns.types" +FILE_INPUT_FORMAT = "file.inputformat" +FILE_OUTPUT_FORMAT = "file.outputformat" +META_TABLE_STORAGE = "storage_handler" +TABLE_IS_TRANSACTIONAL = "transactional" +TABLE_NO_AUTO_COMPACT = "no_auto_compaction" +TABLE_TRANSACTIONAL_PROPERTIES = "transactional_properties" +TABLE_BUCKETING_VERSION = "bucketing_version" +DRUID_CONFIG_PREFIX = "druid." +JDBC_CONFIG_PREFIX = "hive.sql." +TABLE_IS_CTAS = "created_with_ctas" +TABLE_IS_CTLT = "created_with_ctlt" +PARTITION_TRANSFORM_SPEC = "partition_transform_spec" +NO_CLEANUP = "no_cleanup" +CTAS_LEGACY_CONFIG = "create_table_as_external" +DEFAULT_TABLE_TYPE = "defaultTableType" +TXN_ID = "txnId" +WRITE_ID = "writeId" diff --git a/vendor/hive_metastore/ttypes.py b/vendor/hive_metastore/ttypes.py new file mode 100644 index 0000000000..dca7aaadc7 --- /dev/null +++ b/vendor/hive_metastore/ttypes.py @@ -0,0 +1,42515 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# +# Autogenerated by Thrift Compiler (0.16.0) +# +# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING +# +# options string: py +# + +import sys + +from thrift.protocol.TProtocol import TProtocolException +from thrift.Thrift import ( + TException, + TType, +) +from thrift.transport import TTransport +from thrift.TRecursive import fix_spec + +all_structs = [] + + +class HiveObjectType: + GLOBAL = 1 + DATABASE = 2 + TABLE = 3 + PARTITION = 4 + COLUMN = 5 + DATACONNECTOR = 6 + + _VALUES_TO_NAMES = { + 1: "GLOBAL", + 2: "DATABASE", + 3: "TABLE", + 4: "PARTITION", + 5: "COLUMN", + 6: "DATACONNECTOR", + } + + _NAMES_TO_VALUES = { + "GLOBAL": 1, + "DATABASE": 2, + "TABLE": 3, + "PARTITION": 4, + "COLUMN": 5, + "DATACONNECTOR": 6, + } + + +class PrincipalType: + USER = 1 + ROLE = 2 + GROUP = 3 + + _VALUES_TO_NAMES = { + 1: "USER", + 2: "ROLE", + 3: "GROUP", + } + + _NAMES_TO_VALUES = { + "USER": 1, + "ROLE": 2, + "GROUP": 3, + } + + +class PartitionEventType: + LOAD_DONE = 1 + + _VALUES_TO_NAMES = { + 1: "LOAD_DONE", + } + + _NAMES_TO_VALUES = { + "LOAD_DONE": 1, + } + + +class TxnState: + COMMITTED = 1 + ABORTED = 2 + OPEN = 3 + + _VALUES_TO_NAMES = { + 1: "COMMITTED", + 2: "ABORTED", + 3: "OPEN", + } + + _NAMES_TO_VALUES = { + "COMMITTED": 1, + "ABORTED": 2, + "OPEN": 3, + } + + +class LockLevel: + DB = 1 + TABLE = 2 + PARTITION = 3 + + _VALUES_TO_NAMES = { + 1: "DB", + 2: "TABLE", + 3: "PARTITION", + } + + _NAMES_TO_VALUES = { + "DB": 1, + "TABLE": 2, + "PARTITION": 3, + } + + +class LockState: + ACQUIRED = 1 + WAITING = 2 + ABORT = 3 + NOT_ACQUIRED = 4 + + _VALUES_TO_NAMES = { + 1: "ACQUIRED", + 2: "WAITING", + 3: "ABORT", + 4: "NOT_ACQUIRED", + } + + _NAMES_TO_VALUES = { + "ACQUIRED": 1, + "WAITING": 2, + "ABORT": 3, + "NOT_ACQUIRED": 4, + } + + +class LockType: + SHARED_READ = 1 + SHARED_WRITE = 2 + EXCLUSIVE = 3 + EXCL_WRITE = 4 + + _VALUES_TO_NAMES = { + 1: "SHARED_READ", + 2: "SHARED_WRITE", + 3: "EXCLUSIVE", + 4: "EXCL_WRITE", + } + + _NAMES_TO_VALUES = { + "SHARED_READ": 1, + "SHARED_WRITE": 2, + "EXCLUSIVE": 3, + "EXCL_WRITE": 4, + } + + +class CompactionType: + MINOR = 1 + MAJOR = 2 + + _VALUES_TO_NAMES = { + 1: "MINOR", + 2: "MAJOR", + } + + _NAMES_TO_VALUES = { + "MINOR": 1, + "MAJOR": 2, + } + + +class GrantRevokeType: + GRANT = 1 + REVOKE = 2 + + _VALUES_TO_NAMES = { + 1: "GRANT", + 2: "REVOKE", + } + + _NAMES_TO_VALUES = { + "GRANT": 1, + "REVOKE": 2, + } + + +class DataOperationType: + SELECT = 1 + INSERT = 2 + UPDATE = 3 + DELETE = 4 + UNSET = 5 + NO_TXN = 6 + + _VALUES_TO_NAMES = { + 1: "SELECT", + 2: "INSERT", + 3: "UPDATE", + 4: "DELETE", + 5: "UNSET", + 6: "NO_TXN", + } + + _NAMES_TO_VALUES = { + "SELECT": 1, + "INSERT": 2, + "UPDATE": 3, + "DELETE": 4, + "UNSET": 5, + "NO_TXN": 6, + } + + +class EventRequestType: + INSERT = 1 + UPDATE = 2 + DELETE = 3 + + _VALUES_TO_NAMES = { + 1: "INSERT", + 2: "UPDATE", + 3: "DELETE", + } + + _NAMES_TO_VALUES = { + "INSERT": 1, + "UPDATE": 2, + "DELETE": 3, + } + + +class SerdeType: + HIVE = 1 + SCHEMA_REGISTRY = 2 + + _VALUES_TO_NAMES = { + 1: "HIVE", + 2: "SCHEMA_REGISTRY", + } + + _NAMES_TO_VALUES = { + "HIVE": 1, + "SCHEMA_REGISTRY": 2, + } + + +class SchemaType: + HIVE = 1 + AVRO = 2 + + _VALUES_TO_NAMES = { + 1: "HIVE", + 2: "AVRO", + } + + _NAMES_TO_VALUES = { + "HIVE": 1, + "AVRO": 2, + } + + +class SchemaCompatibility: + NONE = 1 + BACKWARD = 2 + FORWARD = 3 + BOTH = 4 + + _VALUES_TO_NAMES = { + 1: "NONE", + 2: "BACKWARD", + 3: "FORWARD", + 4: "BOTH", + } + + _NAMES_TO_VALUES = { + "NONE": 1, + "BACKWARD": 2, + "FORWARD": 3, + "BOTH": 4, + } + + +class SchemaValidation: + LATEST = 1 + ALL = 2 + + _VALUES_TO_NAMES = { + 1: "LATEST", + 2: "ALL", + } + + _NAMES_TO_VALUES = { + "LATEST": 1, + "ALL": 2, + } + + +class SchemaVersionState: + INITIATED = 1 + START_REVIEW = 2 + CHANGES_REQUIRED = 3 + REVIEWED = 4 + ENABLED = 5 + DISABLED = 6 + ARCHIVED = 7 + DELETED = 8 + + _VALUES_TO_NAMES = { + 1: "INITIATED", + 2: "START_REVIEW", + 3: "CHANGES_REQUIRED", + 4: "REVIEWED", + 5: "ENABLED", + 6: "DISABLED", + 7: "ARCHIVED", + 8: "DELETED", + } + + _NAMES_TO_VALUES = { + "INITIATED": 1, + "START_REVIEW": 2, + "CHANGES_REQUIRED": 3, + "REVIEWED": 4, + "ENABLED": 5, + "DISABLED": 6, + "ARCHIVED": 7, + "DELETED": 8, + } + + +class DatabaseType: + NATIVE = 1 + REMOTE = 2 + + _VALUES_TO_NAMES = { + 1: "NATIVE", + 2: "REMOTE", + } + + _NAMES_TO_VALUES = { + "NATIVE": 1, + "REMOTE": 2, + } + + +class FunctionType: + JAVA = 1 + + _VALUES_TO_NAMES = { + 1: "JAVA", + } + + _NAMES_TO_VALUES = { + "JAVA": 1, + } + + +class ResourceType: + JAR = 1 + FILE = 2 + ARCHIVE = 3 + + _VALUES_TO_NAMES = { + 1: "JAR", + 2: "FILE", + 3: "ARCHIVE", + } + + _NAMES_TO_VALUES = { + "JAR": 1, + "FILE": 2, + "ARCHIVE": 3, + } + + +class TxnType: + DEFAULT = 0 + REPL_CREATED = 1 + READ_ONLY = 2 + COMPACTION = 3 + MATER_VIEW_REBUILD = 4 + SOFT_DELETE = 5 + + _VALUES_TO_NAMES = { + 0: "DEFAULT", + 1: "REPL_CREATED", + 2: "READ_ONLY", + 3: "COMPACTION", + 4: "MATER_VIEW_REBUILD", + 5: "SOFT_DELETE", + } + + _NAMES_TO_VALUES = { + "DEFAULT": 0, + "REPL_CREATED": 1, + "READ_ONLY": 2, + "COMPACTION": 3, + "MATER_VIEW_REBUILD": 4, + "SOFT_DELETE": 5, + } + + +class GetTablesExtRequestFields: + ACCESS_TYPE = 1 + PROCESSOR_CAPABILITIES = 2 + ALL = 2147483647 + + _VALUES_TO_NAMES = { + 1: "ACCESS_TYPE", + 2: "PROCESSOR_CAPABILITIES", + 2147483647: "ALL", + } + + _NAMES_TO_VALUES = { + "ACCESS_TYPE": 1, + "PROCESSOR_CAPABILITIES": 2, + "ALL": 2147483647, + } + + +class CompactionMetricsMetricType: + NUM_OBSOLETE_DELTAS = 0 + NUM_DELTAS = 1 + NUM_SMALL_DELTAS = 2 + + _VALUES_TO_NAMES = { + 0: "NUM_OBSOLETE_DELTAS", + 1: "NUM_DELTAS", + 2: "NUM_SMALL_DELTAS", + } + + _NAMES_TO_VALUES = { + "NUM_OBSOLETE_DELTAS": 0, + "NUM_DELTAS": 1, + "NUM_SMALL_DELTAS": 2, + } + + +class FileMetadataExprType: + ORC_SARG = 1 + + _VALUES_TO_NAMES = { + 1: "ORC_SARG", + } + + _NAMES_TO_VALUES = { + "ORC_SARG": 1, + } + + +class ClientCapability: + TEST_CAPABILITY = 1 + INSERT_ONLY_TABLES = 2 + + _VALUES_TO_NAMES = { + 1: "TEST_CAPABILITY", + 2: "INSERT_ONLY_TABLES", + } + + _NAMES_TO_VALUES = { + "TEST_CAPABILITY": 1, + "INSERT_ONLY_TABLES": 2, + } + + +class WMResourcePlanStatus: + ACTIVE = 1 + ENABLED = 2 + DISABLED = 3 + + _VALUES_TO_NAMES = { + 1: "ACTIVE", + 2: "ENABLED", + 3: "DISABLED", + } + + _NAMES_TO_VALUES = { + "ACTIVE": 1, + "ENABLED": 2, + "DISABLED": 3, + } + + +class WMPoolSchedulingPolicy: + FAIR = 1 + FIFO = 2 + + _VALUES_TO_NAMES = { + 1: "FAIR", + 2: "FIFO", + } + + _NAMES_TO_VALUES = { + "FAIR": 1, + "FIFO": 2, + } + + +class ScheduledQueryMaintenanceRequestType: + CREATE = 1 + ALTER = 2 + DROP = 3 + + _VALUES_TO_NAMES = { + 1: "CREATE", + 2: "ALTER", + 3: "DROP", + } + + _NAMES_TO_VALUES = { + "CREATE": 1, + "ALTER": 2, + "DROP": 3, + } + + +class QueryState: + INITED = 0 + EXECUTING = 1 + FAILED = 2 + FINISHED = 3 + TIMED_OUT = 4 + AUTO_DISABLED = 5 + + _VALUES_TO_NAMES = { + 0: "INITED", + 1: "EXECUTING", + 2: "FAILED", + 3: "FINISHED", + 4: "TIMED_OUT", + 5: "AUTO_DISABLED", + } + + _NAMES_TO_VALUES = { + "INITED": 0, + "EXECUTING": 1, + "FAILED": 2, + "FINISHED": 3, + "TIMED_OUT": 4, + "AUTO_DISABLED": 5, + } + + +class PartitionFilterMode: + BY_NAMES = 0 + BY_VALUES = 1 + BY_EXPR = 2 + + _VALUES_TO_NAMES = { + 0: "BY_NAMES", + 1: "BY_VALUES", + 2: "BY_EXPR", + } + + _NAMES_TO_VALUES = { + "BY_NAMES": 0, + "BY_VALUES": 1, + "BY_EXPR": 2, + } + + +class Version: + """ + Attributes: + - version + - comments + + """ + + def __init__( + self, + version=None, + comments=None, + ): + self.version = version + self.comments = comments + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.version = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.comments = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("Version") + if self.version is not None: + oprot.writeFieldBegin("version", TType.STRING, 1) + oprot.writeString(self.version.encode("utf-8") if sys.version_info[0] == 2 else self.version) + oprot.writeFieldEnd() + if self.comments is not None: + oprot.writeFieldBegin("comments", TType.STRING, 2) + oprot.writeString(self.comments.encode("utf-8") if sys.version_info[0] == 2 else self.comments) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class FieldSchema: + """ + Attributes: + - name + - type + - comment + + """ + + def __init__( + self, + name=None, + type=None, + comment=None, + ): + self.name = name + self.type = type + self.comment = comment + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.type = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.comment = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("FieldSchema") + if self.name is not None: + oprot.writeFieldBegin("name", TType.STRING, 1) + oprot.writeString(self.name.encode("utf-8") if sys.version_info[0] == 2 else self.name) + oprot.writeFieldEnd() + if self.type is not None: + oprot.writeFieldBegin("type", TType.STRING, 2) + oprot.writeString(self.type.encode("utf-8") if sys.version_info[0] == 2 else self.type) + oprot.writeFieldEnd() + if self.comment is not None: + oprot.writeFieldBegin("comment", TType.STRING, 3) + oprot.writeString(self.comment.encode("utf-8") if sys.version_info[0] == 2 else self.comment) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class EnvironmentContext: + """ + Attributes: + - properties + + """ + + def __init__( + self, + properties=None, + ): + self.properties = properties + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.MAP: + self.properties = {} + (_ktype1, _vtype2, _size0) = iprot.readMapBegin() + for _i4 in range(_size0): + _key5 = ( + iprot.readString().decode("utf-8", errors="replace") + if sys.version_info[0] == 2 + else iprot.readString() + ) + _val6 = ( + iprot.readString().decode("utf-8", errors="replace") + if sys.version_info[0] == 2 + else iprot.readString() + ) + self.properties[_key5] = _val6 + iprot.readMapEnd() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("EnvironmentContext") + if self.properties is not None: + oprot.writeFieldBegin("properties", TType.MAP, 1) + oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.properties)) + for kiter7, viter8 in self.properties.items(): + oprot.writeString(kiter7.encode("utf-8") if sys.version_info[0] == 2 else kiter7) + oprot.writeString(viter8.encode("utf-8") if sys.version_info[0] == 2 else viter8) + oprot.writeMapEnd() + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class SQLPrimaryKey: + """ + Attributes: + - table_db + - table_name + - column_name + - key_seq + - pk_name + - enable_cstr + - validate_cstr + - rely_cstr + - catName + + """ + + def __init__( + self, + table_db=None, + table_name=None, + column_name=None, + key_seq=None, + pk_name=None, + enable_cstr=None, + validate_cstr=None, + rely_cstr=None, + catName=None, + ): + self.table_db = table_db + self.table_name = table_name + self.column_name = column_name + self.key_seq = key_seq + self.pk_name = pk_name + self.enable_cstr = enable_cstr + self.validate_cstr = validate_cstr + self.rely_cstr = rely_cstr + self.catName = catName + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.table_db = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.table_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.column_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.I32: + self.key_seq = iprot.readI32() + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.STRING: + self.pk_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 6: + if ftype == TType.BOOL: + self.enable_cstr = iprot.readBool() + else: + iprot.skip(ftype) + elif fid == 7: + if ftype == TType.BOOL: + self.validate_cstr = iprot.readBool() + else: + iprot.skip(ftype) + elif fid == 8: + if ftype == TType.BOOL: + self.rely_cstr = iprot.readBool() + else: + iprot.skip(ftype) + elif fid == 9: + if ftype == TType.STRING: + self.catName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("SQLPrimaryKey") + if self.table_db is not None: + oprot.writeFieldBegin("table_db", TType.STRING, 1) + oprot.writeString(self.table_db.encode("utf-8") if sys.version_info[0] == 2 else self.table_db) + oprot.writeFieldEnd() + if self.table_name is not None: + oprot.writeFieldBegin("table_name", TType.STRING, 2) + oprot.writeString(self.table_name.encode("utf-8") if sys.version_info[0] == 2 else self.table_name) + oprot.writeFieldEnd() + if self.column_name is not None: + oprot.writeFieldBegin("column_name", TType.STRING, 3) + oprot.writeString(self.column_name.encode("utf-8") if sys.version_info[0] == 2 else self.column_name) + oprot.writeFieldEnd() + if self.key_seq is not None: + oprot.writeFieldBegin("key_seq", TType.I32, 4) + oprot.writeI32(self.key_seq) + oprot.writeFieldEnd() + if self.pk_name is not None: + oprot.writeFieldBegin("pk_name", TType.STRING, 5) + oprot.writeString(self.pk_name.encode("utf-8") if sys.version_info[0] == 2 else self.pk_name) + oprot.writeFieldEnd() + if self.enable_cstr is not None: + oprot.writeFieldBegin("enable_cstr", TType.BOOL, 6) + oprot.writeBool(self.enable_cstr) + oprot.writeFieldEnd() + if self.validate_cstr is not None: + oprot.writeFieldBegin("validate_cstr", TType.BOOL, 7) + oprot.writeBool(self.validate_cstr) + oprot.writeFieldEnd() + if self.rely_cstr is not None: + oprot.writeFieldBegin("rely_cstr", TType.BOOL, 8) + oprot.writeBool(self.rely_cstr) + oprot.writeFieldEnd() + if self.catName is not None: + oprot.writeFieldBegin("catName", TType.STRING, 9) + oprot.writeString(self.catName.encode("utf-8") if sys.version_info[0] == 2 else self.catName) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class SQLForeignKey: + """ + Attributes: + - pktable_db + - pktable_name + - pkcolumn_name + - fktable_db + - fktable_name + - fkcolumn_name + - key_seq + - update_rule + - delete_rule + - fk_name + - pk_name + - enable_cstr + - validate_cstr + - rely_cstr + - catName + + """ + + def __init__( + self, + pktable_db=None, + pktable_name=None, + pkcolumn_name=None, + fktable_db=None, + fktable_name=None, + fkcolumn_name=None, + key_seq=None, + update_rule=None, + delete_rule=None, + fk_name=None, + pk_name=None, + enable_cstr=None, + validate_cstr=None, + rely_cstr=None, + catName=None, + ): + self.pktable_db = pktable_db + self.pktable_name = pktable_name + self.pkcolumn_name = pkcolumn_name + self.fktable_db = fktable_db + self.fktable_name = fktable_name + self.fkcolumn_name = fkcolumn_name + self.key_seq = key_seq + self.update_rule = update_rule + self.delete_rule = delete_rule + self.fk_name = fk_name + self.pk_name = pk_name + self.enable_cstr = enable_cstr + self.validate_cstr = validate_cstr + self.rely_cstr = rely_cstr + self.catName = catName + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.pktable_db = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.pktable_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.pkcolumn_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRING: + self.fktable_db = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.STRING: + self.fktable_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 6: + if ftype == TType.STRING: + self.fkcolumn_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 7: + if ftype == TType.I32: + self.key_seq = iprot.readI32() + else: + iprot.skip(ftype) + elif fid == 8: + if ftype == TType.I32: + self.update_rule = iprot.readI32() + else: + iprot.skip(ftype) + elif fid == 9: + if ftype == TType.I32: + self.delete_rule = iprot.readI32() + else: + iprot.skip(ftype) + elif fid == 10: + if ftype == TType.STRING: + self.fk_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 11: + if ftype == TType.STRING: + self.pk_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 12: + if ftype == TType.BOOL: + self.enable_cstr = iprot.readBool() + else: + iprot.skip(ftype) + elif fid == 13: + if ftype == TType.BOOL: + self.validate_cstr = iprot.readBool() + else: + iprot.skip(ftype) + elif fid == 14: + if ftype == TType.BOOL: + self.rely_cstr = iprot.readBool() + else: + iprot.skip(ftype) + elif fid == 15: + if ftype == TType.STRING: + self.catName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("SQLForeignKey") + if self.pktable_db is not None: + oprot.writeFieldBegin("pktable_db", TType.STRING, 1) + oprot.writeString(self.pktable_db.encode("utf-8") if sys.version_info[0] == 2 else self.pktable_db) + oprot.writeFieldEnd() + if self.pktable_name is not None: + oprot.writeFieldBegin("pktable_name", TType.STRING, 2) + oprot.writeString(self.pktable_name.encode("utf-8") if sys.version_info[0] == 2 else self.pktable_name) + oprot.writeFieldEnd() + if self.pkcolumn_name is not None: + oprot.writeFieldBegin("pkcolumn_name", TType.STRING, 3) + oprot.writeString(self.pkcolumn_name.encode("utf-8") if sys.version_info[0] == 2 else self.pkcolumn_name) + oprot.writeFieldEnd() + if self.fktable_db is not None: + oprot.writeFieldBegin("fktable_db", TType.STRING, 4) + oprot.writeString(self.fktable_db.encode("utf-8") if sys.version_info[0] == 2 else self.fktable_db) + oprot.writeFieldEnd() + if self.fktable_name is not None: + oprot.writeFieldBegin("fktable_name", TType.STRING, 5) + oprot.writeString(self.fktable_name.encode("utf-8") if sys.version_info[0] == 2 else self.fktable_name) + oprot.writeFieldEnd() + if self.fkcolumn_name is not None: + oprot.writeFieldBegin("fkcolumn_name", TType.STRING, 6) + oprot.writeString(self.fkcolumn_name.encode("utf-8") if sys.version_info[0] == 2 else self.fkcolumn_name) + oprot.writeFieldEnd() + if self.key_seq is not None: + oprot.writeFieldBegin("key_seq", TType.I32, 7) + oprot.writeI32(self.key_seq) + oprot.writeFieldEnd() + if self.update_rule is not None: + oprot.writeFieldBegin("update_rule", TType.I32, 8) + oprot.writeI32(self.update_rule) + oprot.writeFieldEnd() + if self.delete_rule is not None: + oprot.writeFieldBegin("delete_rule", TType.I32, 9) + oprot.writeI32(self.delete_rule) + oprot.writeFieldEnd() + if self.fk_name is not None: + oprot.writeFieldBegin("fk_name", TType.STRING, 10) + oprot.writeString(self.fk_name.encode("utf-8") if sys.version_info[0] == 2 else self.fk_name) + oprot.writeFieldEnd() + if self.pk_name is not None: + oprot.writeFieldBegin("pk_name", TType.STRING, 11) + oprot.writeString(self.pk_name.encode("utf-8") if sys.version_info[0] == 2 else self.pk_name) + oprot.writeFieldEnd() + if self.enable_cstr is not None: + oprot.writeFieldBegin("enable_cstr", TType.BOOL, 12) + oprot.writeBool(self.enable_cstr) + oprot.writeFieldEnd() + if self.validate_cstr is not None: + oprot.writeFieldBegin("validate_cstr", TType.BOOL, 13) + oprot.writeBool(self.validate_cstr) + oprot.writeFieldEnd() + if self.rely_cstr is not None: + oprot.writeFieldBegin("rely_cstr", TType.BOOL, 14) + oprot.writeBool(self.rely_cstr) + oprot.writeFieldEnd() + if self.catName is not None: + oprot.writeFieldBegin("catName", TType.STRING, 15) + oprot.writeString(self.catName.encode("utf-8") if sys.version_info[0] == 2 else self.catName) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class SQLUniqueConstraint: + """ + Attributes: + - catName + - table_db + - table_name + - column_name + - key_seq + - uk_name + - enable_cstr + - validate_cstr + - rely_cstr + + """ + + def __init__( + self, + catName=None, + table_db=None, + table_name=None, + column_name=None, + key_seq=None, + uk_name=None, + enable_cstr=None, + validate_cstr=None, + rely_cstr=None, + ): + self.catName = catName + self.table_db = table_db + self.table_name = table_name + self.column_name = column_name + self.key_seq = key_seq + self.uk_name = uk_name + self.enable_cstr = enable_cstr + self.validate_cstr = validate_cstr + self.rely_cstr = rely_cstr + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.catName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.table_db = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.table_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRING: + self.column_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.I32: + self.key_seq = iprot.readI32() + else: + iprot.skip(ftype) + elif fid == 6: + if ftype == TType.STRING: + self.uk_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 7: + if ftype == TType.BOOL: + self.enable_cstr = iprot.readBool() + else: + iprot.skip(ftype) + elif fid == 8: + if ftype == TType.BOOL: + self.validate_cstr = iprot.readBool() + else: + iprot.skip(ftype) + elif fid == 9: + if ftype == TType.BOOL: + self.rely_cstr = iprot.readBool() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("SQLUniqueConstraint") + if self.catName is not None: + oprot.writeFieldBegin("catName", TType.STRING, 1) + oprot.writeString(self.catName.encode("utf-8") if sys.version_info[0] == 2 else self.catName) + oprot.writeFieldEnd() + if self.table_db is not None: + oprot.writeFieldBegin("table_db", TType.STRING, 2) + oprot.writeString(self.table_db.encode("utf-8") if sys.version_info[0] == 2 else self.table_db) + oprot.writeFieldEnd() + if self.table_name is not None: + oprot.writeFieldBegin("table_name", TType.STRING, 3) + oprot.writeString(self.table_name.encode("utf-8") if sys.version_info[0] == 2 else self.table_name) + oprot.writeFieldEnd() + if self.column_name is not None: + oprot.writeFieldBegin("column_name", TType.STRING, 4) + oprot.writeString(self.column_name.encode("utf-8") if sys.version_info[0] == 2 else self.column_name) + oprot.writeFieldEnd() + if self.key_seq is not None: + oprot.writeFieldBegin("key_seq", TType.I32, 5) + oprot.writeI32(self.key_seq) + oprot.writeFieldEnd() + if self.uk_name is not None: + oprot.writeFieldBegin("uk_name", TType.STRING, 6) + oprot.writeString(self.uk_name.encode("utf-8") if sys.version_info[0] == 2 else self.uk_name) + oprot.writeFieldEnd() + if self.enable_cstr is not None: + oprot.writeFieldBegin("enable_cstr", TType.BOOL, 7) + oprot.writeBool(self.enable_cstr) + oprot.writeFieldEnd() + if self.validate_cstr is not None: + oprot.writeFieldBegin("validate_cstr", TType.BOOL, 8) + oprot.writeBool(self.validate_cstr) + oprot.writeFieldEnd() + if self.rely_cstr is not None: + oprot.writeFieldBegin("rely_cstr", TType.BOOL, 9) + oprot.writeBool(self.rely_cstr) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class SQLNotNullConstraint: + """ + Attributes: + - catName + - table_db + - table_name + - column_name + - nn_name + - enable_cstr + - validate_cstr + - rely_cstr + + """ + + def __init__( + self, + catName=None, + table_db=None, + table_name=None, + column_name=None, + nn_name=None, + enable_cstr=None, + validate_cstr=None, + rely_cstr=None, + ): + self.catName = catName + self.table_db = table_db + self.table_name = table_name + self.column_name = column_name + self.nn_name = nn_name + self.enable_cstr = enable_cstr + self.validate_cstr = validate_cstr + self.rely_cstr = rely_cstr + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.catName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.table_db = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.table_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRING: + self.column_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.STRING: + self.nn_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 6: + if ftype == TType.BOOL: + self.enable_cstr = iprot.readBool() + else: + iprot.skip(ftype) + elif fid == 7: + if ftype == TType.BOOL: + self.validate_cstr = iprot.readBool() + else: + iprot.skip(ftype) + elif fid == 8: + if ftype == TType.BOOL: + self.rely_cstr = iprot.readBool() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("SQLNotNullConstraint") + if self.catName is not None: + oprot.writeFieldBegin("catName", TType.STRING, 1) + oprot.writeString(self.catName.encode("utf-8") if sys.version_info[0] == 2 else self.catName) + oprot.writeFieldEnd() + if self.table_db is not None: + oprot.writeFieldBegin("table_db", TType.STRING, 2) + oprot.writeString(self.table_db.encode("utf-8") if sys.version_info[0] == 2 else self.table_db) + oprot.writeFieldEnd() + if self.table_name is not None: + oprot.writeFieldBegin("table_name", TType.STRING, 3) + oprot.writeString(self.table_name.encode("utf-8") if sys.version_info[0] == 2 else self.table_name) + oprot.writeFieldEnd() + if self.column_name is not None: + oprot.writeFieldBegin("column_name", TType.STRING, 4) + oprot.writeString(self.column_name.encode("utf-8") if sys.version_info[0] == 2 else self.column_name) + oprot.writeFieldEnd() + if self.nn_name is not None: + oprot.writeFieldBegin("nn_name", TType.STRING, 5) + oprot.writeString(self.nn_name.encode("utf-8") if sys.version_info[0] == 2 else self.nn_name) + oprot.writeFieldEnd() + if self.enable_cstr is not None: + oprot.writeFieldBegin("enable_cstr", TType.BOOL, 6) + oprot.writeBool(self.enable_cstr) + oprot.writeFieldEnd() + if self.validate_cstr is not None: + oprot.writeFieldBegin("validate_cstr", TType.BOOL, 7) + oprot.writeBool(self.validate_cstr) + oprot.writeFieldEnd() + if self.rely_cstr is not None: + oprot.writeFieldBegin("rely_cstr", TType.BOOL, 8) + oprot.writeBool(self.rely_cstr) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class SQLDefaultConstraint: + """ + Attributes: + - catName + - table_db + - table_name + - column_name + - default_value + - dc_name + - enable_cstr + - validate_cstr + - rely_cstr + + """ + + def __init__( + self, + catName=None, + table_db=None, + table_name=None, + column_name=None, + default_value=None, + dc_name=None, + enable_cstr=None, + validate_cstr=None, + rely_cstr=None, + ): + self.catName = catName + self.table_db = table_db + self.table_name = table_name + self.column_name = column_name + self.default_value = default_value + self.dc_name = dc_name + self.enable_cstr = enable_cstr + self.validate_cstr = validate_cstr + self.rely_cstr = rely_cstr + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.catName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.table_db = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.table_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRING: + self.column_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.STRING: + self.default_value = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 6: + if ftype == TType.STRING: + self.dc_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 7: + if ftype == TType.BOOL: + self.enable_cstr = iprot.readBool() + else: + iprot.skip(ftype) + elif fid == 8: + if ftype == TType.BOOL: + self.validate_cstr = iprot.readBool() + else: + iprot.skip(ftype) + elif fid == 9: + if ftype == TType.BOOL: + self.rely_cstr = iprot.readBool() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("SQLDefaultConstraint") + if self.catName is not None: + oprot.writeFieldBegin("catName", TType.STRING, 1) + oprot.writeString(self.catName.encode("utf-8") if sys.version_info[0] == 2 else self.catName) + oprot.writeFieldEnd() + if self.table_db is not None: + oprot.writeFieldBegin("table_db", TType.STRING, 2) + oprot.writeString(self.table_db.encode("utf-8") if sys.version_info[0] == 2 else self.table_db) + oprot.writeFieldEnd() + if self.table_name is not None: + oprot.writeFieldBegin("table_name", TType.STRING, 3) + oprot.writeString(self.table_name.encode("utf-8") if sys.version_info[0] == 2 else self.table_name) + oprot.writeFieldEnd() + if self.column_name is not None: + oprot.writeFieldBegin("column_name", TType.STRING, 4) + oprot.writeString(self.column_name.encode("utf-8") if sys.version_info[0] == 2 else self.column_name) + oprot.writeFieldEnd() + if self.default_value is not None: + oprot.writeFieldBegin("default_value", TType.STRING, 5) + oprot.writeString(self.default_value.encode("utf-8") if sys.version_info[0] == 2 else self.default_value) + oprot.writeFieldEnd() + if self.dc_name is not None: + oprot.writeFieldBegin("dc_name", TType.STRING, 6) + oprot.writeString(self.dc_name.encode("utf-8") if sys.version_info[0] == 2 else self.dc_name) + oprot.writeFieldEnd() + if self.enable_cstr is not None: + oprot.writeFieldBegin("enable_cstr", TType.BOOL, 7) + oprot.writeBool(self.enable_cstr) + oprot.writeFieldEnd() + if self.validate_cstr is not None: + oprot.writeFieldBegin("validate_cstr", TType.BOOL, 8) + oprot.writeBool(self.validate_cstr) + oprot.writeFieldEnd() + if self.rely_cstr is not None: + oprot.writeFieldBegin("rely_cstr", TType.BOOL, 9) + oprot.writeBool(self.rely_cstr) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class SQLCheckConstraint: + """ + Attributes: + - catName + - table_db + - table_name + - column_name + - check_expression + - dc_name + - enable_cstr + - validate_cstr + - rely_cstr + + """ + + def __init__( + self, + catName=None, + table_db=None, + table_name=None, + column_name=None, + check_expression=None, + dc_name=None, + enable_cstr=None, + validate_cstr=None, + rely_cstr=None, + ): + self.catName = catName + self.table_db = table_db + self.table_name = table_name + self.column_name = column_name + self.check_expression = check_expression + self.dc_name = dc_name + self.enable_cstr = enable_cstr + self.validate_cstr = validate_cstr + self.rely_cstr = rely_cstr + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.catName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.table_db = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.table_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRING: + self.column_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.STRING: + self.check_expression = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 6: + if ftype == TType.STRING: + self.dc_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 7: + if ftype == TType.BOOL: + self.enable_cstr = iprot.readBool() + else: + iprot.skip(ftype) + elif fid == 8: + if ftype == TType.BOOL: + self.validate_cstr = iprot.readBool() + else: + iprot.skip(ftype) + elif fid == 9: + if ftype == TType.BOOL: + self.rely_cstr = iprot.readBool() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("SQLCheckConstraint") + if self.catName is not None: + oprot.writeFieldBegin("catName", TType.STRING, 1) + oprot.writeString(self.catName.encode("utf-8") if sys.version_info[0] == 2 else self.catName) + oprot.writeFieldEnd() + if self.table_db is not None: + oprot.writeFieldBegin("table_db", TType.STRING, 2) + oprot.writeString(self.table_db.encode("utf-8") if sys.version_info[0] == 2 else self.table_db) + oprot.writeFieldEnd() + if self.table_name is not None: + oprot.writeFieldBegin("table_name", TType.STRING, 3) + oprot.writeString(self.table_name.encode("utf-8") if sys.version_info[0] == 2 else self.table_name) + oprot.writeFieldEnd() + if self.column_name is not None: + oprot.writeFieldBegin("column_name", TType.STRING, 4) + oprot.writeString(self.column_name.encode("utf-8") if sys.version_info[0] == 2 else self.column_name) + oprot.writeFieldEnd() + if self.check_expression is not None: + oprot.writeFieldBegin("check_expression", TType.STRING, 5) + oprot.writeString(self.check_expression.encode("utf-8") if sys.version_info[0] == 2 else self.check_expression) + oprot.writeFieldEnd() + if self.dc_name is not None: + oprot.writeFieldBegin("dc_name", TType.STRING, 6) + oprot.writeString(self.dc_name.encode("utf-8") if sys.version_info[0] == 2 else self.dc_name) + oprot.writeFieldEnd() + if self.enable_cstr is not None: + oprot.writeFieldBegin("enable_cstr", TType.BOOL, 7) + oprot.writeBool(self.enable_cstr) + oprot.writeFieldEnd() + if self.validate_cstr is not None: + oprot.writeFieldBegin("validate_cstr", TType.BOOL, 8) + oprot.writeBool(self.validate_cstr) + oprot.writeFieldEnd() + if self.rely_cstr is not None: + oprot.writeFieldBegin("rely_cstr", TType.BOOL, 9) + oprot.writeBool(self.rely_cstr) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class SQLAllTableConstraints: + """ + Attributes: + - primaryKeys + - foreignKeys + - uniqueConstraints + - notNullConstraints + - defaultConstraints + - checkConstraints + + """ + + def __init__( + self, + primaryKeys=None, + foreignKeys=None, + uniqueConstraints=None, + notNullConstraints=None, + defaultConstraints=None, + checkConstraints=None, + ): + self.primaryKeys = primaryKeys + self.foreignKeys = foreignKeys + self.uniqueConstraints = uniqueConstraints + self.notNullConstraints = notNullConstraints + self.defaultConstraints = defaultConstraints + self.checkConstraints = checkConstraints + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.LIST: + self.primaryKeys = [] + (_etype12, _size9) = iprot.readListBegin() + for _i13 in range(_size9): + _elem14 = SQLPrimaryKey() + _elem14.read(iprot) + self.primaryKeys.append(_elem14) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.LIST: + self.foreignKeys = [] + (_etype18, _size15) = iprot.readListBegin() + for _i19 in range(_size15): + _elem20 = SQLForeignKey() + _elem20.read(iprot) + self.foreignKeys.append(_elem20) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.LIST: + self.uniqueConstraints = [] + (_etype24, _size21) = iprot.readListBegin() + for _i25 in range(_size21): + _elem26 = SQLUniqueConstraint() + _elem26.read(iprot) + self.uniqueConstraints.append(_elem26) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.LIST: + self.notNullConstraints = [] + (_etype30, _size27) = iprot.readListBegin() + for _i31 in range(_size27): + _elem32 = SQLNotNullConstraint() + _elem32.read(iprot) + self.notNullConstraints.append(_elem32) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.LIST: + self.defaultConstraints = [] + (_etype36, _size33) = iprot.readListBegin() + for _i37 in range(_size33): + _elem38 = SQLDefaultConstraint() + _elem38.read(iprot) + self.defaultConstraints.append(_elem38) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 6: + if ftype == TType.LIST: + self.checkConstraints = [] + (_etype42, _size39) = iprot.readListBegin() + for _i43 in range(_size39): + _elem44 = SQLCheckConstraint() + _elem44.read(iprot) + self.checkConstraints.append(_elem44) + iprot.readListEnd() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("SQLAllTableConstraints") + if self.primaryKeys is not None: + oprot.writeFieldBegin("primaryKeys", TType.LIST, 1) + oprot.writeListBegin(TType.STRUCT, len(self.primaryKeys)) + for iter45 in self.primaryKeys: + iter45.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.foreignKeys is not None: + oprot.writeFieldBegin("foreignKeys", TType.LIST, 2) + oprot.writeListBegin(TType.STRUCT, len(self.foreignKeys)) + for iter46 in self.foreignKeys: + iter46.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.uniqueConstraints is not None: + oprot.writeFieldBegin("uniqueConstraints", TType.LIST, 3) + oprot.writeListBegin(TType.STRUCT, len(self.uniqueConstraints)) + for iter47 in self.uniqueConstraints: + iter47.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.notNullConstraints is not None: + oprot.writeFieldBegin("notNullConstraints", TType.LIST, 4) + oprot.writeListBegin(TType.STRUCT, len(self.notNullConstraints)) + for iter48 in self.notNullConstraints: + iter48.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.defaultConstraints is not None: + oprot.writeFieldBegin("defaultConstraints", TType.LIST, 5) + oprot.writeListBegin(TType.STRUCT, len(self.defaultConstraints)) + for iter49 in self.defaultConstraints: + iter49.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.checkConstraints is not None: + oprot.writeFieldBegin("checkConstraints", TType.LIST, 6) + oprot.writeListBegin(TType.STRUCT, len(self.checkConstraints)) + for iter50 in self.checkConstraints: + iter50.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class Type: + """ + Attributes: + - name + - type1 + - type2 + - fields + + """ + + def __init__( + self, + name=None, + type1=None, + type2=None, + fields=None, + ): + self.name = name + self.type1 = type1 + self.type2 = type2 + self.fields = fields + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.type1 = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.type2 = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.LIST: + self.fields = [] + (_etype54, _size51) = iprot.readListBegin() + for _i55 in range(_size51): + _elem56 = FieldSchema() + _elem56.read(iprot) + self.fields.append(_elem56) + iprot.readListEnd() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("Type") + if self.name is not None: + oprot.writeFieldBegin("name", TType.STRING, 1) + oprot.writeString(self.name.encode("utf-8") if sys.version_info[0] == 2 else self.name) + oprot.writeFieldEnd() + if self.type1 is not None: + oprot.writeFieldBegin("type1", TType.STRING, 2) + oprot.writeString(self.type1.encode("utf-8") if sys.version_info[0] == 2 else self.type1) + oprot.writeFieldEnd() + if self.type2 is not None: + oprot.writeFieldBegin("type2", TType.STRING, 3) + oprot.writeString(self.type2.encode("utf-8") if sys.version_info[0] == 2 else self.type2) + oprot.writeFieldEnd() + if self.fields is not None: + oprot.writeFieldBegin("fields", TType.LIST, 4) + oprot.writeListBegin(TType.STRUCT, len(self.fields)) + for iter57 in self.fields: + iter57.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class HiveObjectRef: + """ + Attributes: + - objectType + - dbName + - objectName + - partValues + - columnName + - catName + + """ + + def __init__( + self, + objectType=None, + dbName=None, + objectName=None, + partValues=None, + columnName=None, + catName=None, + ): + self.objectType = objectType + self.dbName = dbName + self.objectName = objectName + self.partValues = partValues + self.columnName = columnName + self.catName = catName + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.I32: + self.objectType = iprot.readI32() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.dbName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.objectName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.LIST: + self.partValues = [] + (_etype61, _size58) = iprot.readListBegin() + for _i62 in range(_size58): + _elem63 = ( + iprot.readString().decode("utf-8", errors="replace") + if sys.version_info[0] == 2 + else iprot.readString() + ) + self.partValues.append(_elem63) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.STRING: + self.columnName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 6: + if ftype == TType.STRING: + self.catName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("HiveObjectRef") + if self.objectType is not None: + oprot.writeFieldBegin("objectType", TType.I32, 1) + oprot.writeI32(self.objectType) + oprot.writeFieldEnd() + if self.dbName is not None: + oprot.writeFieldBegin("dbName", TType.STRING, 2) + oprot.writeString(self.dbName.encode("utf-8") if sys.version_info[0] == 2 else self.dbName) + oprot.writeFieldEnd() + if self.objectName is not None: + oprot.writeFieldBegin("objectName", TType.STRING, 3) + oprot.writeString(self.objectName.encode("utf-8") if sys.version_info[0] == 2 else self.objectName) + oprot.writeFieldEnd() + if self.partValues is not None: + oprot.writeFieldBegin("partValues", TType.LIST, 4) + oprot.writeListBegin(TType.STRING, len(self.partValues)) + for iter64 in self.partValues: + oprot.writeString(iter64.encode("utf-8") if sys.version_info[0] == 2 else iter64) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.columnName is not None: + oprot.writeFieldBegin("columnName", TType.STRING, 5) + oprot.writeString(self.columnName.encode("utf-8") if sys.version_info[0] == 2 else self.columnName) + oprot.writeFieldEnd() + if self.catName is not None: + oprot.writeFieldBegin("catName", TType.STRING, 6) + oprot.writeString(self.catName.encode("utf-8") if sys.version_info[0] == 2 else self.catName) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class PrivilegeGrantInfo: + """ + Attributes: + - privilege + - createTime + - grantor + - grantorType + - grantOption + + """ + + def __init__( + self, + privilege=None, + createTime=None, + grantor=None, + grantorType=None, + grantOption=None, + ): + self.privilege = privilege + self.createTime = createTime + self.grantor = grantor + self.grantorType = grantorType + self.grantOption = grantOption + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.privilege = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.I32: + self.createTime = iprot.readI32() + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.grantor = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.I32: + self.grantorType = iprot.readI32() + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.BOOL: + self.grantOption = iprot.readBool() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("PrivilegeGrantInfo") + if self.privilege is not None: + oprot.writeFieldBegin("privilege", TType.STRING, 1) + oprot.writeString(self.privilege.encode("utf-8") if sys.version_info[0] == 2 else self.privilege) + oprot.writeFieldEnd() + if self.createTime is not None: + oprot.writeFieldBegin("createTime", TType.I32, 2) + oprot.writeI32(self.createTime) + oprot.writeFieldEnd() + if self.grantor is not None: + oprot.writeFieldBegin("grantor", TType.STRING, 3) + oprot.writeString(self.grantor.encode("utf-8") if sys.version_info[0] == 2 else self.grantor) + oprot.writeFieldEnd() + if self.grantorType is not None: + oprot.writeFieldBegin("grantorType", TType.I32, 4) + oprot.writeI32(self.grantorType) + oprot.writeFieldEnd() + if self.grantOption is not None: + oprot.writeFieldBegin("grantOption", TType.BOOL, 5) + oprot.writeBool(self.grantOption) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class HiveObjectPrivilege: + """ + Attributes: + - hiveObject + - principalName + - principalType + - grantInfo + - authorizer + + """ + + def __init__( + self, + hiveObject=None, + principalName=None, + principalType=None, + grantInfo=None, + authorizer=None, + ): + self.hiveObject = hiveObject + self.principalName = principalName + self.principalType = principalType + self.grantInfo = grantInfo + self.authorizer = authorizer + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.hiveObject = HiveObjectRef() + self.hiveObject.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.principalName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.I32: + self.principalType = iprot.readI32() + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRUCT: + self.grantInfo = PrivilegeGrantInfo() + self.grantInfo.read(iprot) + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.STRING: + self.authorizer = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("HiveObjectPrivilege") + if self.hiveObject is not None: + oprot.writeFieldBegin("hiveObject", TType.STRUCT, 1) + self.hiveObject.write(oprot) + oprot.writeFieldEnd() + if self.principalName is not None: + oprot.writeFieldBegin("principalName", TType.STRING, 2) + oprot.writeString(self.principalName.encode("utf-8") if sys.version_info[0] == 2 else self.principalName) + oprot.writeFieldEnd() + if self.principalType is not None: + oprot.writeFieldBegin("principalType", TType.I32, 3) + oprot.writeI32(self.principalType) + oprot.writeFieldEnd() + if self.grantInfo is not None: + oprot.writeFieldBegin("grantInfo", TType.STRUCT, 4) + self.grantInfo.write(oprot) + oprot.writeFieldEnd() + if self.authorizer is not None: + oprot.writeFieldBegin("authorizer", TType.STRING, 5) + oprot.writeString(self.authorizer.encode("utf-8") if sys.version_info[0] == 2 else self.authorizer) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class PrivilegeBag: + """ + Attributes: + - privileges + + """ + + def __init__( + self, + privileges=None, + ): + self.privileges = privileges + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.LIST: + self.privileges = [] + (_etype68, _size65) = iprot.readListBegin() + for _i69 in range(_size65): + _elem70 = HiveObjectPrivilege() + _elem70.read(iprot) + self.privileges.append(_elem70) + iprot.readListEnd() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("PrivilegeBag") + if self.privileges is not None: + oprot.writeFieldBegin("privileges", TType.LIST, 1) + oprot.writeListBegin(TType.STRUCT, len(self.privileges)) + for iter71 in self.privileges: + iter71.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class PrincipalPrivilegeSet: + """ + Attributes: + - userPrivileges + - groupPrivileges + - rolePrivileges + + """ + + def __init__( + self, + userPrivileges=None, + groupPrivileges=None, + rolePrivileges=None, + ): + self.userPrivileges = userPrivileges + self.groupPrivileges = groupPrivileges + self.rolePrivileges = rolePrivileges + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.MAP: + self.userPrivileges = {} + (_ktype73, _vtype74, _size72) = iprot.readMapBegin() + for _i76 in range(_size72): + _key77 = ( + iprot.readString().decode("utf-8", errors="replace") + if sys.version_info[0] == 2 + else iprot.readString() + ) + _val78 = [] + (_etype82, _size79) = iprot.readListBegin() + for _i83 in range(_size79): + _elem84 = PrivilegeGrantInfo() + _elem84.read(iprot) + _val78.append(_elem84) + iprot.readListEnd() + self.userPrivileges[_key77] = _val78 + iprot.readMapEnd() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.MAP: + self.groupPrivileges = {} + (_ktype86, _vtype87, _size85) = iprot.readMapBegin() + for _i89 in range(_size85): + _key90 = ( + iprot.readString().decode("utf-8", errors="replace") + if sys.version_info[0] == 2 + else iprot.readString() + ) + _val91 = [] + (_etype95, _size92) = iprot.readListBegin() + for _i96 in range(_size92): + _elem97 = PrivilegeGrantInfo() + _elem97.read(iprot) + _val91.append(_elem97) + iprot.readListEnd() + self.groupPrivileges[_key90] = _val91 + iprot.readMapEnd() + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.MAP: + self.rolePrivileges = {} + (_ktype99, _vtype100, _size98) = iprot.readMapBegin() + for _i102 in range(_size98): + _key103 = ( + iprot.readString().decode("utf-8", errors="replace") + if sys.version_info[0] == 2 + else iprot.readString() + ) + _val104 = [] + (_etype108, _size105) = iprot.readListBegin() + for _i109 in range(_size105): + _elem110 = PrivilegeGrantInfo() + _elem110.read(iprot) + _val104.append(_elem110) + iprot.readListEnd() + self.rolePrivileges[_key103] = _val104 + iprot.readMapEnd() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("PrincipalPrivilegeSet") + if self.userPrivileges is not None: + oprot.writeFieldBegin("userPrivileges", TType.MAP, 1) + oprot.writeMapBegin(TType.STRING, TType.LIST, len(self.userPrivileges)) + for kiter111, viter112 in self.userPrivileges.items(): + oprot.writeString(kiter111.encode("utf-8") if sys.version_info[0] == 2 else kiter111) + oprot.writeListBegin(TType.STRUCT, len(viter112)) + for iter113 in viter112: + iter113.write(oprot) + oprot.writeListEnd() + oprot.writeMapEnd() + oprot.writeFieldEnd() + if self.groupPrivileges is not None: + oprot.writeFieldBegin("groupPrivileges", TType.MAP, 2) + oprot.writeMapBegin(TType.STRING, TType.LIST, len(self.groupPrivileges)) + for kiter114, viter115 in self.groupPrivileges.items(): + oprot.writeString(kiter114.encode("utf-8") if sys.version_info[0] == 2 else kiter114) + oprot.writeListBegin(TType.STRUCT, len(viter115)) + for iter116 in viter115: + iter116.write(oprot) + oprot.writeListEnd() + oprot.writeMapEnd() + oprot.writeFieldEnd() + if self.rolePrivileges is not None: + oprot.writeFieldBegin("rolePrivileges", TType.MAP, 3) + oprot.writeMapBegin(TType.STRING, TType.LIST, len(self.rolePrivileges)) + for kiter117, viter118 in self.rolePrivileges.items(): + oprot.writeString(kiter117.encode("utf-8") if sys.version_info[0] == 2 else kiter117) + oprot.writeListBegin(TType.STRUCT, len(viter118)) + for iter119 in viter118: + iter119.write(oprot) + oprot.writeListEnd() + oprot.writeMapEnd() + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class GrantRevokePrivilegeRequest: + """ + Attributes: + - requestType + - privileges + - revokeGrantOption + + """ + + def __init__( + self, + requestType=None, + privileges=None, + revokeGrantOption=None, + ): + self.requestType = requestType + self.privileges = privileges + self.revokeGrantOption = revokeGrantOption + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.I32: + self.requestType = iprot.readI32() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.privileges = PrivilegeBag() + self.privileges.read(iprot) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.BOOL: + self.revokeGrantOption = iprot.readBool() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("GrantRevokePrivilegeRequest") + if self.requestType is not None: + oprot.writeFieldBegin("requestType", TType.I32, 1) + oprot.writeI32(self.requestType) + oprot.writeFieldEnd() + if self.privileges is not None: + oprot.writeFieldBegin("privileges", TType.STRUCT, 2) + self.privileges.write(oprot) + oprot.writeFieldEnd() + if self.revokeGrantOption is not None: + oprot.writeFieldBegin("revokeGrantOption", TType.BOOL, 3) + oprot.writeBool(self.revokeGrantOption) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class GrantRevokePrivilegeResponse: + """ + Attributes: + - success + + """ + + def __init__( + self, + success=None, + ): + self.success = success + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.BOOL: + self.success = iprot.readBool() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("GrantRevokePrivilegeResponse") + if self.success is not None: + oprot.writeFieldBegin("success", TType.BOOL, 1) + oprot.writeBool(self.success) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class TruncateTableRequest: + """ + Attributes: + - dbName + - tableName + - partNames + - writeId + - validWriteIdList + - environmentContext + + """ + + def __init__( + self, + dbName=None, + tableName=None, + partNames=None, + writeId=-1, + validWriteIdList=None, + environmentContext=None, + ): + self.dbName = dbName + self.tableName = tableName + self.partNames = partNames + self.writeId = writeId + self.validWriteIdList = validWriteIdList + self.environmentContext = environmentContext + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.dbName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.tableName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.LIST: + self.partNames = [] + (_etype123, _size120) = iprot.readListBegin() + for _i124 in range(_size120): + _elem125 = ( + iprot.readString().decode("utf-8", errors="replace") + if sys.version_info[0] == 2 + else iprot.readString() + ) + self.partNames.append(_elem125) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.I64: + self.writeId = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.STRING: + self.validWriteIdList = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 6: + if ftype == TType.STRUCT: + self.environmentContext = EnvironmentContext() + self.environmentContext.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("TruncateTableRequest") + if self.dbName is not None: + oprot.writeFieldBegin("dbName", TType.STRING, 1) + oprot.writeString(self.dbName.encode("utf-8") if sys.version_info[0] == 2 else self.dbName) + oprot.writeFieldEnd() + if self.tableName is not None: + oprot.writeFieldBegin("tableName", TType.STRING, 2) + oprot.writeString(self.tableName.encode("utf-8") if sys.version_info[0] == 2 else self.tableName) + oprot.writeFieldEnd() + if self.partNames is not None: + oprot.writeFieldBegin("partNames", TType.LIST, 3) + oprot.writeListBegin(TType.STRING, len(self.partNames)) + for iter126 in self.partNames: + oprot.writeString(iter126.encode("utf-8") if sys.version_info[0] == 2 else iter126) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.writeId is not None: + oprot.writeFieldBegin("writeId", TType.I64, 4) + oprot.writeI64(self.writeId) + oprot.writeFieldEnd() + if self.validWriteIdList is not None: + oprot.writeFieldBegin("validWriteIdList", TType.STRING, 5) + oprot.writeString(self.validWriteIdList.encode("utf-8") if sys.version_info[0] == 2 else self.validWriteIdList) + oprot.writeFieldEnd() + if self.environmentContext is not None: + oprot.writeFieldBegin("environmentContext", TType.STRUCT, 6) + self.environmentContext.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.dbName is None: + raise TProtocolException(message="Required field dbName is unset!") + if self.tableName is None: + raise TProtocolException(message="Required field tableName is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class TruncateTableResponse: + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("TruncateTableResponse") + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class Role: + """ + Attributes: + - roleName + - createTime + - ownerName + + """ + + def __init__( + self, + roleName=None, + createTime=None, + ownerName=None, + ): + self.roleName = roleName + self.createTime = createTime + self.ownerName = ownerName + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.roleName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.I32: + self.createTime = iprot.readI32() + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.ownerName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("Role") + if self.roleName is not None: + oprot.writeFieldBegin("roleName", TType.STRING, 1) + oprot.writeString(self.roleName.encode("utf-8") if sys.version_info[0] == 2 else self.roleName) + oprot.writeFieldEnd() + if self.createTime is not None: + oprot.writeFieldBegin("createTime", TType.I32, 2) + oprot.writeI32(self.createTime) + oprot.writeFieldEnd() + if self.ownerName is not None: + oprot.writeFieldBegin("ownerName", TType.STRING, 3) + oprot.writeString(self.ownerName.encode("utf-8") if sys.version_info[0] == 2 else self.ownerName) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class RolePrincipalGrant: + """ + Attributes: + - roleName + - principalName + - principalType + - grantOption + - grantTime + - grantorName + - grantorPrincipalType + + """ + + def __init__( + self, + roleName=None, + principalName=None, + principalType=None, + grantOption=None, + grantTime=None, + grantorName=None, + grantorPrincipalType=None, + ): + self.roleName = roleName + self.principalName = principalName + self.principalType = principalType + self.grantOption = grantOption + self.grantTime = grantTime + self.grantorName = grantorName + self.grantorPrincipalType = grantorPrincipalType + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.roleName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.principalName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.I32: + self.principalType = iprot.readI32() + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.BOOL: + self.grantOption = iprot.readBool() + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.I32: + self.grantTime = iprot.readI32() + else: + iprot.skip(ftype) + elif fid == 6: + if ftype == TType.STRING: + self.grantorName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 7: + if ftype == TType.I32: + self.grantorPrincipalType = iprot.readI32() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("RolePrincipalGrant") + if self.roleName is not None: + oprot.writeFieldBegin("roleName", TType.STRING, 1) + oprot.writeString(self.roleName.encode("utf-8") if sys.version_info[0] == 2 else self.roleName) + oprot.writeFieldEnd() + if self.principalName is not None: + oprot.writeFieldBegin("principalName", TType.STRING, 2) + oprot.writeString(self.principalName.encode("utf-8") if sys.version_info[0] == 2 else self.principalName) + oprot.writeFieldEnd() + if self.principalType is not None: + oprot.writeFieldBegin("principalType", TType.I32, 3) + oprot.writeI32(self.principalType) + oprot.writeFieldEnd() + if self.grantOption is not None: + oprot.writeFieldBegin("grantOption", TType.BOOL, 4) + oprot.writeBool(self.grantOption) + oprot.writeFieldEnd() + if self.grantTime is not None: + oprot.writeFieldBegin("grantTime", TType.I32, 5) + oprot.writeI32(self.grantTime) + oprot.writeFieldEnd() + if self.grantorName is not None: + oprot.writeFieldBegin("grantorName", TType.STRING, 6) + oprot.writeString(self.grantorName.encode("utf-8") if sys.version_info[0] == 2 else self.grantorName) + oprot.writeFieldEnd() + if self.grantorPrincipalType is not None: + oprot.writeFieldBegin("grantorPrincipalType", TType.I32, 7) + oprot.writeI32(self.grantorPrincipalType) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class GetRoleGrantsForPrincipalRequest: + """ + Attributes: + - principal_name + - principal_type + + """ + + def __init__( + self, + principal_name=None, + principal_type=None, + ): + self.principal_name = principal_name + self.principal_type = principal_type + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.principal_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.I32: + self.principal_type = iprot.readI32() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("GetRoleGrantsForPrincipalRequest") + if self.principal_name is not None: + oprot.writeFieldBegin("principal_name", TType.STRING, 1) + oprot.writeString(self.principal_name.encode("utf-8") if sys.version_info[0] == 2 else self.principal_name) + oprot.writeFieldEnd() + if self.principal_type is not None: + oprot.writeFieldBegin("principal_type", TType.I32, 2) + oprot.writeI32(self.principal_type) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.principal_name is None: + raise TProtocolException(message="Required field principal_name is unset!") + if self.principal_type is None: + raise TProtocolException(message="Required field principal_type is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class GetRoleGrantsForPrincipalResponse: + """ + Attributes: + - principalGrants + + """ + + def __init__( + self, + principalGrants=None, + ): + self.principalGrants = principalGrants + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.LIST: + self.principalGrants = [] + (_etype130, _size127) = iprot.readListBegin() + for _i131 in range(_size127): + _elem132 = RolePrincipalGrant() + _elem132.read(iprot) + self.principalGrants.append(_elem132) + iprot.readListEnd() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("GetRoleGrantsForPrincipalResponse") + if self.principalGrants is not None: + oprot.writeFieldBegin("principalGrants", TType.LIST, 1) + oprot.writeListBegin(TType.STRUCT, len(self.principalGrants)) + for iter133 in self.principalGrants: + iter133.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.principalGrants is None: + raise TProtocolException(message="Required field principalGrants is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class GetPrincipalsInRoleRequest: + """ + Attributes: + - roleName + + """ + + def __init__( + self, + roleName=None, + ): + self.roleName = roleName + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.roleName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("GetPrincipalsInRoleRequest") + if self.roleName is not None: + oprot.writeFieldBegin("roleName", TType.STRING, 1) + oprot.writeString(self.roleName.encode("utf-8") if sys.version_info[0] == 2 else self.roleName) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.roleName is None: + raise TProtocolException(message="Required field roleName is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class GetPrincipalsInRoleResponse: + """ + Attributes: + - principalGrants + + """ + + def __init__( + self, + principalGrants=None, + ): + self.principalGrants = principalGrants + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.LIST: + self.principalGrants = [] + (_etype137, _size134) = iprot.readListBegin() + for _i138 in range(_size134): + _elem139 = RolePrincipalGrant() + _elem139.read(iprot) + self.principalGrants.append(_elem139) + iprot.readListEnd() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("GetPrincipalsInRoleResponse") + if self.principalGrants is not None: + oprot.writeFieldBegin("principalGrants", TType.LIST, 1) + oprot.writeListBegin(TType.STRUCT, len(self.principalGrants)) + for iter140 in self.principalGrants: + iter140.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.principalGrants is None: + raise TProtocolException(message="Required field principalGrants is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class GrantRevokeRoleRequest: + """ + Attributes: + - requestType + - roleName + - principalName + - principalType + - grantor + - grantorType + - grantOption + + """ + + def __init__( + self, + requestType=None, + roleName=None, + principalName=None, + principalType=None, + grantor=None, + grantorType=None, + grantOption=None, + ): + self.requestType = requestType + self.roleName = roleName + self.principalName = principalName + self.principalType = principalType + self.grantor = grantor + self.grantorType = grantorType + self.grantOption = grantOption + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.I32: + self.requestType = iprot.readI32() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.roleName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.principalName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.I32: + self.principalType = iprot.readI32() + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.STRING: + self.grantor = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 6: + if ftype == TType.I32: + self.grantorType = iprot.readI32() + else: + iprot.skip(ftype) + elif fid == 7: + if ftype == TType.BOOL: + self.grantOption = iprot.readBool() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("GrantRevokeRoleRequest") + if self.requestType is not None: + oprot.writeFieldBegin("requestType", TType.I32, 1) + oprot.writeI32(self.requestType) + oprot.writeFieldEnd() + if self.roleName is not None: + oprot.writeFieldBegin("roleName", TType.STRING, 2) + oprot.writeString(self.roleName.encode("utf-8") if sys.version_info[0] == 2 else self.roleName) + oprot.writeFieldEnd() + if self.principalName is not None: + oprot.writeFieldBegin("principalName", TType.STRING, 3) + oprot.writeString(self.principalName.encode("utf-8") if sys.version_info[0] == 2 else self.principalName) + oprot.writeFieldEnd() + if self.principalType is not None: + oprot.writeFieldBegin("principalType", TType.I32, 4) + oprot.writeI32(self.principalType) + oprot.writeFieldEnd() + if self.grantor is not None: + oprot.writeFieldBegin("grantor", TType.STRING, 5) + oprot.writeString(self.grantor.encode("utf-8") if sys.version_info[0] == 2 else self.grantor) + oprot.writeFieldEnd() + if self.grantorType is not None: + oprot.writeFieldBegin("grantorType", TType.I32, 6) + oprot.writeI32(self.grantorType) + oprot.writeFieldEnd() + if self.grantOption is not None: + oprot.writeFieldBegin("grantOption", TType.BOOL, 7) + oprot.writeBool(self.grantOption) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class GrantRevokeRoleResponse: + """ + Attributes: + - success + + """ + + def __init__( + self, + success=None, + ): + self.success = success + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.BOOL: + self.success = iprot.readBool() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("GrantRevokeRoleResponse") + if self.success is not None: + oprot.writeFieldBegin("success", TType.BOOL, 1) + oprot.writeBool(self.success) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class Catalog: + """ + Attributes: + - name + - description + - locationUri + - createTime + + """ + + def __init__( + self, + name=None, + description=None, + locationUri=None, + createTime=None, + ): + self.name = name + self.description = description + self.locationUri = locationUri + self.createTime = createTime + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.description = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.locationUri = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.I32: + self.createTime = iprot.readI32() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("Catalog") + if self.name is not None: + oprot.writeFieldBegin("name", TType.STRING, 1) + oprot.writeString(self.name.encode("utf-8") if sys.version_info[0] == 2 else self.name) + oprot.writeFieldEnd() + if self.description is not None: + oprot.writeFieldBegin("description", TType.STRING, 2) + oprot.writeString(self.description.encode("utf-8") if sys.version_info[0] == 2 else self.description) + oprot.writeFieldEnd() + if self.locationUri is not None: + oprot.writeFieldBegin("locationUri", TType.STRING, 3) + oprot.writeString(self.locationUri.encode("utf-8") if sys.version_info[0] == 2 else self.locationUri) + oprot.writeFieldEnd() + if self.createTime is not None: + oprot.writeFieldBegin("createTime", TType.I32, 4) + oprot.writeI32(self.createTime) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class CreateCatalogRequest: + """ + Attributes: + - catalog + + """ + + def __init__( + self, + catalog=None, + ): + self.catalog = catalog + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.catalog = Catalog() + self.catalog.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("CreateCatalogRequest") + if self.catalog is not None: + oprot.writeFieldBegin("catalog", TType.STRUCT, 1) + self.catalog.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class AlterCatalogRequest: + """ + Attributes: + - name + - newCat + + """ + + def __init__( + self, + name=None, + newCat=None, + ): + self.name = name + self.newCat = newCat + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.newCat = Catalog() + self.newCat.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("AlterCatalogRequest") + if self.name is not None: + oprot.writeFieldBegin("name", TType.STRING, 1) + oprot.writeString(self.name.encode("utf-8") if sys.version_info[0] == 2 else self.name) + oprot.writeFieldEnd() + if self.newCat is not None: + oprot.writeFieldBegin("newCat", TType.STRUCT, 2) + self.newCat.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class GetCatalogRequest: + """ + Attributes: + - name + + """ + + def __init__( + self, + name=None, + ): + self.name = name + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("GetCatalogRequest") + if self.name is not None: + oprot.writeFieldBegin("name", TType.STRING, 1) + oprot.writeString(self.name.encode("utf-8") if sys.version_info[0] == 2 else self.name) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class GetCatalogResponse: + """ + Attributes: + - catalog + + """ + + def __init__( + self, + catalog=None, + ): + self.catalog = catalog + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.catalog = Catalog() + self.catalog.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("GetCatalogResponse") + if self.catalog is not None: + oprot.writeFieldBegin("catalog", TType.STRUCT, 1) + self.catalog.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class GetCatalogsResponse: + """ + Attributes: + - names + + """ + + def __init__( + self, + names=None, + ): + self.names = names + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.LIST: + self.names = [] + (_etype144, _size141) = iprot.readListBegin() + for _i145 in range(_size141): + _elem146 = ( + iprot.readString().decode("utf-8", errors="replace") + if sys.version_info[0] == 2 + else iprot.readString() + ) + self.names.append(_elem146) + iprot.readListEnd() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("GetCatalogsResponse") + if self.names is not None: + oprot.writeFieldBegin("names", TType.LIST, 1) + oprot.writeListBegin(TType.STRING, len(self.names)) + for iter147 in self.names: + oprot.writeString(iter147.encode("utf-8") if sys.version_info[0] == 2 else iter147) + oprot.writeListEnd() + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class DropCatalogRequest: + """ + Attributes: + - name + + """ + + def __init__( + self, + name=None, + ): + self.name = name + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("DropCatalogRequest") + if self.name is not None: + oprot.writeFieldBegin("name", TType.STRING, 1) + oprot.writeString(self.name.encode("utf-8") if sys.version_info[0] == 2 else self.name) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class Database: + """ + Attributes: + - name + - description + - locationUri + - parameters + - privileges + - ownerName + - ownerType + - catalogName + - createTime + - managedLocationUri + - type + - connector_name + - remote_dbname + + """ + + def __init__( + self, + name=None, + description=None, + locationUri=None, + parameters=None, + privileges=None, + ownerName=None, + ownerType=None, + catalogName=None, + createTime=None, + managedLocationUri=None, + type=None, + connector_name=None, + remote_dbname=None, + ): + self.name = name + self.description = description + self.locationUri = locationUri + self.parameters = parameters + self.privileges = privileges + self.ownerName = ownerName + self.ownerType = ownerType + self.catalogName = catalogName + self.createTime = createTime + self.managedLocationUri = managedLocationUri + self.type = type + self.connector_name = connector_name + self.remote_dbname = remote_dbname + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.description = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.locationUri = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.MAP: + self.parameters = {} + (_ktype149, _vtype150, _size148) = iprot.readMapBegin() + for _i152 in range(_size148): + _key153 = ( + iprot.readString().decode("utf-8", errors="replace") + if sys.version_info[0] == 2 + else iprot.readString() + ) + _val154 = ( + iprot.readString().decode("utf-8", errors="replace") + if sys.version_info[0] == 2 + else iprot.readString() + ) + self.parameters[_key153] = _val154 + iprot.readMapEnd() + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.STRUCT: + self.privileges = PrincipalPrivilegeSet() + self.privileges.read(iprot) + else: + iprot.skip(ftype) + elif fid == 6: + if ftype == TType.STRING: + self.ownerName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 7: + if ftype == TType.I32: + self.ownerType = iprot.readI32() + else: + iprot.skip(ftype) + elif fid == 8: + if ftype == TType.STRING: + self.catalogName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 9: + if ftype == TType.I32: + self.createTime = iprot.readI32() + else: + iprot.skip(ftype) + elif fid == 10: + if ftype == TType.STRING: + self.managedLocationUri = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 11: + if ftype == TType.I32: + self.type = iprot.readI32() + else: + iprot.skip(ftype) + elif fid == 12: + if ftype == TType.STRING: + self.connector_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 13: + if ftype == TType.STRING: + self.remote_dbname = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("Database") + if self.name is not None: + oprot.writeFieldBegin("name", TType.STRING, 1) + oprot.writeString(self.name.encode("utf-8") if sys.version_info[0] == 2 else self.name) + oprot.writeFieldEnd() + if self.description is not None: + oprot.writeFieldBegin("description", TType.STRING, 2) + oprot.writeString(self.description.encode("utf-8") if sys.version_info[0] == 2 else self.description) + oprot.writeFieldEnd() + if self.locationUri is not None: + oprot.writeFieldBegin("locationUri", TType.STRING, 3) + oprot.writeString(self.locationUri.encode("utf-8") if sys.version_info[0] == 2 else self.locationUri) + oprot.writeFieldEnd() + if self.parameters is not None: + oprot.writeFieldBegin("parameters", TType.MAP, 4) + oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.parameters)) + for kiter155, viter156 in self.parameters.items(): + oprot.writeString(kiter155.encode("utf-8") if sys.version_info[0] == 2 else kiter155) + oprot.writeString(viter156.encode("utf-8") if sys.version_info[0] == 2 else viter156) + oprot.writeMapEnd() + oprot.writeFieldEnd() + if self.privileges is not None: + oprot.writeFieldBegin("privileges", TType.STRUCT, 5) + self.privileges.write(oprot) + oprot.writeFieldEnd() + if self.ownerName is not None: + oprot.writeFieldBegin("ownerName", TType.STRING, 6) + oprot.writeString(self.ownerName.encode("utf-8") if sys.version_info[0] == 2 else self.ownerName) + oprot.writeFieldEnd() + if self.ownerType is not None: + oprot.writeFieldBegin("ownerType", TType.I32, 7) + oprot.writeI32(self.ownerType) + oprot.writeFieldEnd() + if self.catalogName is not None: + oprot.writeFieldBegin("catalogName", TType.STRING, 8) + oprot.writeString(self.catalogName.encode("utf-8") if sys.version_info[0] == 2 else self.catalogName) + oprot.writeFieldEnd() + if self.createTime is not None: + oprot.writeFieldBegin("createTime", TType.I32, 9) + oprot.writeI32(self.createTime) + oprot.writeFieldEnd() + if self.managedLocationUri is not None: + oprot.writeFieldBegin("managedLocationUri", TType.STRING, 10) + oprot.writeString(self.managedLocationUri.encode("utf-8") if sys.version_info[0] == 2 else self.managedLocationUri) + oprot.writeFieldEnd() + if self.type is not None: + oprot.writeFieldBegin("type", TType.I32, 11) + oprot.writeI32(self.type) + oprot.writeFieldEnd() + if self.connector_name is not None: + oprot.writeFieldBegin("connector_name", TType.STRING, 12) + oprot.writeString(self.connector_name.encode("utf-8") if sys.version_info[0] == 2 else self.connector_name) + oprot.writeFieldEnd() + if self.remote_dbname is not None: + oprot.writeFieldBegin("remote_dbname", TType.STRING, 13) + oprot.writeString(self.remote_dbname.encode("utf-8") if sys.version_info[0] == 2 else self.remote_dbname) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class SerDeInfo: + """ + Attributes: + - name + - serializationLib + - parameters + - description + - serializerClass + - deserializerClass + - serdeType + + """ + + def __init__( + self, + name=None, + serializationLib=None, + parameters=None, + description=None, + serializerClass=None, + deserializerClass=None, + serdeType=None, + ): + self.name = name + self.serializationLib = serializationLib + self.parameters = parameters + self.description = description + self.serializerClass = serializerClass + self.deserializerClass = deserializerClass + self.serdeType = serdeType + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.serializationLib = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.MAP: + self.parameters = {} + (_ktype158, _vtype159, _size157) = iprot.readMapBegin() + for _i161 in range(_size157): + _key162 = ( + iprot.readString().decode("utf-8", errors="replace") + if sys.version_info[0] == 2 + else iprot.readString() + ) + _val163 = ( + iprot.readString().decode("utf-8", errors="replace") + if sys.version_info[0] == 2 + else iprot.readString() + ) + self.parameters[_key162] = _val163 + iprot.readMapEnd() + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRING: + self.description = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.STRING: + self.serializerClass = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 6: + if ftype == TType.STRING: + self.deserializerClass = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 7: + if ftype == TType.I32: + self.serdeType = iprot.readI32() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("SerDeInfo") + if self.name is not None: + oprot.writeFieldBegin("name", TType.STRING, 1) + oprot.writeString(self.name.encode("utf-8") if sys.version_info[0] == 2 else self.name) + oprot.writeFieldEnd() + if self.serializationLib is not None: + oprot.writeFieldBegin("serializationLib", TType.STRING, 2) + oprot.writeString(self.serializationLib.encode("utf-8") if sys.version_info[0] == 2 else self.serializationLib) + oprot.writeFieldEnd() + if self.parameters is not None: + oprot.writeFieldBegin("parameters", TType.MAP, 3) + oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.parameters)) + for kiter164, viter165 in self.parameters.items(): + oprot.writeString(kiter164.encode("utf-8") if sys.version_info[0] == 2 else kiter164) + oprot.writeString(viter165.encode("utf-8") if sys.version_info[0] == 2 else viter165) + oprot.writeMapEnd() + oprot.writeFieldEnd() + if self.description is not None: + oprot.writeFieldBegin("description", TType.STRING, 4) + oprot.writeString(self.description.encode("utf-8") if sys.version_info[0] == 2 else self.description) + oprot.writeFieldEnd() + if self.serializerClass is not None: + oprot.writeFieldBegin("serializerClass", TType.STRING, 5) + oprot.writeString(self.serializerClass.encode("utf-8") if sys.version_info[0] == 2 else self.serializerClass) + oprot.writeFieldEnd() + if self.deserializerClass is not None: + oprot.writeFieldBegin("deserializerClass", TType.STRING, 6) + oprot.writeString(self.deserializerClass.encode("utf-8") if sys.version_info[0] == 2 else self.deserializerClass) + oprot.writeFieldEnd() + if self.serdeType is not None: + oprot.writeFieldBegin("serdeType", TType.I32, 7) + oprot.writeI32(self.serdeType) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class Order: + """ + Attributes: + - col + - order + + """ + + def __init__( + self, + col=None, + order=None, + ): + self.col = col + self.order = order + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.col = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.I32: + self.order = iprot.readI32() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("Order") + if self.col is not None: + oprot.writeFieldBegin("col", TType.STRING, 1) + oprot.writeString(self.col.encode("utf-8") if sys.version_info[0] == 2 else self.col) + oprot.writeFieldEnd() + if self.order is not None: + oprot.writeFieldBegin("order", TType.I32, 2) + oprot.writeI32(self.order) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class SkewedInfo: + """ + Attributes: + - skewedColNames + - skewedColValues + - skewedColValueLocationMaps + + """ + + def __init__( + self, + skewedColNames=None, + skewedColValues=None, + skewedColValueLocationMaps=None, + ): + self.skewedColNames = skewedColNames + self.skewedColValues = skewedColValues + self.skewedColValueLocationMaps = skewedColValueLocationMaps + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.LIST: + self.skewedColNames = [] + (_etype169, _size166) = iprot.readListBegin() + for _i170 in range(_size166): + _elem171 = ( + iprot.readString().decode("utf-8", errors="replace") + if sys.version_info[0] == 2 + else iprot.readString() + ) + self.skewedColNames.append(_elem171) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.LIST: + self.skewedColValues = [] + (_etype175, _size172) = iprot.readListBegin() + for _i176 in range(_size172): + _elem177 = [] + (_etype181, _size178) = iprot.readListBegin() + for _i182 in range(_size178): + _elem183 = ( + iprot.readString().decode("utf-8", errors="replace") + if sys.version_info[0] == 2 + else iprot.readString() + ) + _elem177.append(_elem183) + iprot.readListEnd() + self.skewedColValues.append(_elem177) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.MAP: + self.skewedColValueLocationMaps = {} + (_ktype185, _vtype186, _size184) = iprot.readMapBegin() + for _i188 in range(_size184): + _key189 = [] + (_etype194, _size191) = iprot.readListBegin() + for _i195 in range(_size191): + _elem196 = ( + iprot.readString().decode("utf-8", errors="replace") + if sys.version_info[0] == 2 + else iprot.readString() + ) + _key189.append(_elem196) + iprot.readListEnd() + _val190 = ( + iprot.readString().decode("utf-8", errors="replace") + if sys.version_info[0] == 2 + else iprot.readString() + ) + self.skewedColValueLocationMaps[_key189] = _val190 + iprot.readMapEnd() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("SkewedInfo") + if self.skewedColNames is not None: + oprot.writeFieldBegin("skewedColNames", TType.LIST, 1) + oprot.writeListBegin(TType.STRING, len(self.skewedColNames)) + for iter197 in self.skewedColNames: + oprot.writeString(iter197.encode("utf-8") if sys.version_info[0] == 2 else iter197) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.skewedColValues is not None: + oprot.writeFieldBegin("skewedColValues", TType.LIST, 2) + oprot.writeListBegin(TType.LIST, len(self.skewedColValues)) + for iter198 in self.skewedColValues: + oprot.writeListBegin(TType.STRING, len(iter198)) + for iter199 in iter198: + oprot.writeString(iter199.encode("utf-8") if sys.version_info[0] == 2 else iter199) + oprot.writeListEnd() + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.skewedColValueLocationMaps is not None: + oprot.writeFieldBegin("skewedColValueLocationMaps", TType.MAP, 3) + oprot.writeMapBegin(TType.LIST, TType.STRING, len(self.skewedColValueLocationMaps)) + for kiter200, viter201 in self.skewedColValueLocationMaps.items(): + oprot.writeListBegin(TType.STRING, len(kiter200)) + for iter202 in kiter200: + oprot.writeString(iter202.encode("utf-8") if sys.version_info[0] == 2 else iter202) + oprot.writeListEnd() + oprot.writeString(viter201.encode("utf-8") if sys.version_info[0] == 2 else viter201) + oprot.writeMapEnd() + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class StorageDescriptor: + """ + Attributes: + - cols + - location + - inputFormat + - outputFormat + - compressed + - numBuckets + - serdeInfo + - bucketCols + - sortCols + - parameters + - skewedInfo + - storedAsSubDirectories + + """ + + def __init__( + self, + cols=None, + location=None, + inputFormat=None, + outputFormat=None, + compressed=None, + numBuckets=None, + serdeInfo=None, + bucketCols=None, + sortCols=None, + parameters=None, + skewedInfo=None, + storedAsSubDirectories=None, + ): + self.cols = cols + self.location = location + self.inputFormat = inputFormat + self.outputFormat = outputFormat + self.compressed = compressed + self.numBuckets = numBuckets + self.serdeInfo = serdeInfo + self.bucketCols = bucketCols + self.sortCols = sortCols + self.parameters = parameters + self.skewedInfo = skewedInfo + self.storedAsSubDirectories = storedAsSubDirectories + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.LIST: + self.cols = [] + (_etype206, _size203) = iprot.readListBegin() + for _i207 in range(_size203): + _elem208 = FieldSchema() + _elem208.read(iprot) + self.cols.append(_elem208) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.location = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.inputFormat = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRING: + self.outputFormat = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.BOOL: + self.compressed = iprot.readBool() + else: + iprot.skip(ftype) + elif fid == 6: + if ftype == TType.I32: + self.numBuckets = iprot.readI32() + else: + iprot.skip(ftype) + elif fid == 7: + if ftype == TType.STRUCT: + self.serdeInfo = SerDeInfo() + self.serdeInfo.read(iprot) + else: + iprot.skip(ftype) + elif fid == 8: + if ftype == TType.LIST: + self.bucketCols = [] + (_etype212, _size209) = iprot.readListBegin() + for _i213 in range(_size209): + _elem214 = ( + iprot.readString().decode("utf-8", errors="replace") + if sys.version_info[0] == 2 + else iprot.readString() + ) + self.bucketCols.append(_elem214) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 9: + if ftype == TType.LIST: + self.sortCols = [] + (_etype218, _size215) = iprot.readListBegin() + for _i219 in range(_size215): + _elem220 = Order() + _elem220.read(iprot) + self.sortCols.append(_elem220) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 10: + if ftype == TType.MAP: + self.parameters = {} + (_ktype222, _vtype223, _size221) = iprot.readMapBegin() + for _i225 in range(_size221): + _key226 = ( + iprot.readString().decode("utf-8", errors="replace") + if sys.version_info[0] == 2 + else iprot.readString() + ) + _val227 = ( + iprot.readString().decode("utf-8", errors="replace") + if sys.version_info[0] == 2 + else iprot.readString() + ) + self.parameters[_key226] = _val227 + iprot.readMapEnd() + else: + iprot.skip(ftype) + elif fid == 11: + if ftype == TType.STRUCT: + self.skewedInfo = SkewedInfo() + self.skewedInfo.read(iprot) + else: + iprot.skip(ftype) + elif fid == 12: + if ftype == TType.BOOL: + self.storedAsSubDirectories = iprot.readBool() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("StorageDescriptor") + if self.cols is not None: + oprot.writeFieldBegin("cols", TType.LIST, 1) + oprot.writeListBegin(TType.STRUCT, len(self.cols)) + for iter228 in self.cols: + iter228.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.location is not None: + oprot.writeFieldBegin("location", TType.STRING, 2) + oprot.writeString(self.location.encode("utf-8") if sys.version_info[0] == 2 else self.location) + oprot.writeFieldEnd() + if self.inputFormat is not None: + oprot.writeFieldBegin("inputFormat", TType.STRING, 3) + oprot.writeString(self.inputFormat.encode("utf-8") if sys.version_info[0] == 2 else self.inputFormat) + oprot.writeFieldEnd() + if self.outputFormat is not None: + oprot.writeFieldBegin("outputFormat", TType.STRING, 4) + oprot.writeString(self.outputFormat.encode("utf-8") if sys.version_info[0] == 2 else self.outputFormat) + oprot.writeFieldEnd() + if self.compressed is not None: + oprot.writeFieldBegin("compressed", TType.BOOL, 5) + oprot.writeBool(self.compressed) + oprot.writeFieldEnd() + if self.numBuckets is not None: + oprot.writeFieldBegin("numBuckets", TType.I32, 6) + oprot.writeI32(self.numBuckets) + oprot.writeFieldEnd() + if self.serdeInfo is not None: + oprot.writeFieldBegin("serdeInfo", TType.STRUCT, 7) + self.serdeInfo.write(oprot) + oprot.writeFieldEnd() + if self.bucketCols is not None: + oprot.writeFieldBegin("bucketCols", TType.LIST, 8) + oprot.writeListBegin(TType.STRING, len(self.bucketCols)) + for iter229 in self.bucketCols: + oprot.writeString(iter229.encode("utf-8") if sys.version_info[0] == 2 else iter229) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.sortCols is not None: + oprot.writeFieldBegin("sortCols", TType.LIST, 9) + oprot.writeListBegin(TType.STRUCT, len(self.sortCols)) + for iter230 in self.sortCols: + iter230.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.parameters is not None: + oprot.writeFieldBegin("parameters", TType.MAP, 10) + oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.parameters)) + for kiter231, viter232 in self.parameters.items(): + oprot.writeString(kiter231.encode("utf-8") if sys.version_info[0] == 2 else kiter231) + oprot.writeString(viter232.encode("utf-8") if sys.version_info[0] == 2 else viter232) + oprot.writeMapEnd() + oprot.writeFieldEnd() + if self.skewedInfo is not None: + oprot.writeFieldBegin("skewedInfo", TType.STRUCT, 11) + self.skewedInfo.write(oprot) + oprot.writeFieldEnd() + if self.storedAsSubDirectories is not None: + oprot.writeFieldBegin("storedAsSubDirectories", TType.BOOL, 12) + oprot.writeBool(self.storedAsSubDirectories) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class CreationMetadata: + """ + Attributes: + - catName + - dbName + - tblName + - tablesUsed + - validTxnList + - materializationTime + - sourceTables + + """ + + def __init__( + self, + catName=None, + dbName=None, + tblName=None, + tablesUsed=None, + validTxnList=None, + materializationTime=None, + sourceTables=None, + ): + self.catName = catName + self.dbName = dbName + self.tblName = tblName + self.tablesUsed = tablesUsed + self.validTxnList = validTxnList + self.materializationTime = materializationTime + self.sourceTables = sourceTables + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.catName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.dbName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.tblName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.SET: + self.tablesUsed = set() + (_etype236, _size233) = iprot.readSetBegin() + for _i237 in range(_size233): + _elem238 = ( + iprot.readString().decode("utf-8", errors="replace") + if sys.version_info[0] == 2 + else iprot.readString() + ) + self.tablesUsed.add(_elem238) + iprot.readSetEnd() + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.STRING: + self.validTxnList = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 6: + if ftype == TType.I64: + self.materializationTime = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 7: + if ftype == TType.LIST: + self.sourceTables = [] + (_etype242, _size239) = iprot.readListBegin() + for _i243 in range(_size239): + _elem244 = SourceTable() + _elem244.read(iprot) + self.sourceTables.append(_elem244) + iprot.readListEnd() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("CreationMetadata") + if self.catName is not None: + oprot.writeFieldBegin("catName", TType.STRING, 1) + oprot.writeString(self.catName.encode("utf-8") if sys.version_info[0] == 2 else self.catName) + oprot.writeFieldEnd() + if self.dbName is not None: + oprot.writeFieldBegin("dbName", TType.STRING, 2) + oprot.writeString(self.dbName.encode("utf-8") if sys.version_info[0] == 2 else self.dbName) + oprot.writeFieldEnd() + if self.tblName is not None: + oprot.writeFieldBegin("tblName", TType.STRING, 3) + oprot.writeString(self.tblName.encode("utf-8") if sys.version_info[0] == 2 else self.tblName) + oprot.writeFieldEnd() + if self.tablesUsed is not None: + oprot.writeFieldBegin("tablesUsed", TType.SET, 4) + oprot.writeSetBegin(TType.STRING, len(self.tablesUsed)) + for iter245 in self.tablesUsed: + oprot.writeString(iter245.encode("utf-8") if sys.version_info[0] == 2 else iter245) + oprot.writeSetEnd() + oprot.writeFieldEnd() + if self.validTxnList is not None: + oprot.writeFieldBegin("validTxnList", TType.STRING, 5) + oprot.writeString(self.validTxnList.encode("utf-8") if sys.version_info[0] == 2 else self.validTxnList) + oprot.writeFieldEnd() + if self.materializationTime is not None: + oprot.writeFieldBegin("materializationTime", TType.I64, 6) + oprot.writeI64(self.materializationTime) + oprot.writeFieldEnd() + if self.sourceTables is not None: + oprot.writeFieldBegin("sourceTables", TType.LIST, 7) + oprot.writeListBegin(TType.STRUCT, len(self.sourceTables)) + for iter246 in self.sourceTables: + iter246.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.catName is None: + raise TProtocolException(message="Required field catName is unset!") + if self.dbName is None: + raise TProtocolException(message="Required field dbName is unset!") + if self.tblName is None: + raise TProtocolException(message="Required field tblName is unset!") + if self.tablesUsed is None: + raise TProtocolException(message="Required field tablesUsed is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class BooleanColumnStatsData: + """ + Attributes: + - numTrues + - numFalses + - numNulls + - bitVectors + + """ + + def __init__( + self, + numTrues=None, + numFalses=None, + numNulls=None, + bitVectors=None, + ): + self.numTrues = numTrues + self.numFalses = numFalses + self.numNulls = numNulls + self.bitVectors = bitVectors + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.I64: + self.numTrues = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.I64: + self.numFalses = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.I64: + self.numNulls = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRING: + self.bitVectors = iprot.readBinary() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("BooleanColumnStatsData") + if self.numTrues is not None: + oprot.writeFieldBegin("numTrues", TType.I64, 1) + oprot.writeI64(self.numTrues) + oprot.writeFieldEnd() + if self.numFalses is not None: + oprot.writeFieldBegin("numFalses", TType.I64, 2) + oprot.writeI64(self.numFalses) + oprot.writeFieldEnd() + if self.numNulls is not None: + oprot.writeFieldBegin("numNulls", TType.I64, 3) + oprot.writeI64(self.numNulls) + oprot.writeFieldEnd() + if self.bitVectors is not None: + oprot.writeFieldBegin("bitVectors", TType.STRING, 4) + oprot.writeBinary(self.bitVectors) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.numTrues is None: + raise TProtocolException(message="Required field numTrues is unset!") + if self.numFalses is None: + raise TProtocolException(message="Required field numFalses is unset!") + if self.numNulls is None: + raise TProtocolException(message="Required field numNulls is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class DoubleColumnStatsData: + """ + Attributes: + - lowValue + - highValue + - numNulls + - numDVs + - bitVectors + + """ + + def __init__( + self, + lowValue=None, + highValue=None, + numNulls=None, + numDVs=None, + bitVectors=None, + ): + self.lowValue = lowValue + self.highValue = highValue + self.numNulls = numNulls + self.numDVs = numDVs + self.bitVectors = bitVectors + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.DOUBLE: + self.lowValue = iprot.readDouble() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.DOUBLE: + self.highValue = iprot.readDouble() + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.I64: + self.numNulls = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.I64: + self.numDVs = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.STRING: + self.bitVectors = iprot.readBinary() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("DoubleColumnStatsData") + if self.lowValue is not None: + oprot.writeFieldBegin("lowValue", TType.DOUBLE, 1) + oprot.writeDouble(self.lowValue) + oprot.writeFieldEnd() + if self.highValue is not None: + oprot.writeFieldBegin("highValue", TType.DOUBLE, 2) + oprot.writeDouble(self.highValue) + oprot.writeFieldEnd() + if self.numNulls is not None: + oprot.writeFieldBegin("numNulls", TType.I64, 3) + oprot.writeI64(self.numNulls) + oprot.writeFieldEnd() + if self.numDVs is not None: + oprot.writeFieldBegin("numDVs", TType.I64, 4) + oprot.writeI64(self.numDVs) + oprot.writeFieldEnd() + if self.bitVectors is not None: + oprot.writeFieldBegin("bitVectors", TType.STRING, 5) + oprot.writeBinary(self.bitVectors) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.numNulls is None: + raise TProtocolException(message="Required field numNulls is unset!") + if self.numDVs is None: + raise TProtocolException(message="Required field numDVs is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class LongColumnStatsData: + """ + Attributes: + - lowValue + - highValue + - numNulls + - numDVs + - bitVectors + + """ + + def __init__( + self, + lowValue=None, + highValue=None, + numNulls=None, + numDVs=None, + bitVectors=None, + ): + self.lowValue = lowValue + self.highValue = highValue + self.numNulls = numNulls + self.numDVs = numDVs + self.bitVectors = bitVectors + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.I64: + self.lowValue = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.I64: + self.highValue = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.I64: + self.numNulls = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.I64: + self.numDVs = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.STRING: + self.bitVectors = iprot.readBinary() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("LongColumnStatsData") + if self.lowValue is not None: + oprot.writeFieldBegin("lowValue", TType.I64, 1) + oprot.writeI64(self.lowValue) + oprot.writeFieldEnd() + if self.highValue is not None: + oprot.writeFieldBegin("highValue", TType.I64, 2) + oprot.writeI64(self.highValue) + oprot.writeFieldEnd() + if self.numNulls is not None: + oprot.writeFieldBegin("numNulls", TType.I64, 3) + oprot.writeI64(self.numNulls) + oprot.writeFieldEnd() + if self.numDVs is not None: + oprot.writeFieldBegin("numDVs", TType.I64, 4) + oprot.writeI64(self.numDVs) + oprot.writeFieldEnd() + if self.bitVectors is not None: + oprot.writeFieldBegin("bitVectors", TType.STRING, 5) + oprot.writeBinary(self.bitVectors) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.numNulls is None: + raise TProtocolException(message="Required field numNulls is unset!") + if self.numDVs is None: + raise TProtocolException(message="Required field numDVs is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class StringColumnStatsData: + """ + Attributes: + - maxColLen + - avgColLen + - numNulls + - numDVs + - bitVectors + + """ + + def __init__( + self, + maxColLen=None, + avgColLen=None, + numNulls=None, + numDVs=None, + bitVectors=None, + ): + self.maxColLen = maxColLen + self.avgColLen = avgColLen + self.numNulls = numNulls + self.numDVs = numDVs + self.bitVectors = bitVectors + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.I64: + self.maxColLen = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.DOUBLE: + self.avgColLen = iprot.readDouble() + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.I64: + self.numNulls = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.I64: + self.numDVs = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.STRING: + self.bitVectors = iprot.readBinary() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("StringColumnStatsData") + if self.maxColLen is not None: + oprot.writeFieldBegin("maxColLen", TType.I64, 1) + oprot.writeI64(self.maxColLen) + oprot.writeFieldEnd() + if self.avgColLen is not None: + oprot.writeFieldBegin("avgColLen", TType.DOUBLE, 2) + oprot.writeDouble(self.avgColLen) + oprot.writeFieldEnd() + if self.numNulls is not None: + oprot.writeFieldBegin("numNulls", TType.I64, 3) + oprot.writeI64(self.numNulls) + oprot.writeFieldEnd() + if self.numDVs is not None: + oprot.writeFieldBegin("numDVs", TType.I64, 4) + oprot.writeI64(self.numDVs) + oprot.writeFieldEnd() + if self.bitVectors is not None: + oprot.writeFieldBegin("bitVectors", TType.STRING, 5) + oprot.writeBinary(self.bitVectors) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.maxColLen is None: + raise TProtocolException(message="Required field maxColLen is unset!") + if self.avgColLen is None: + raise TProtocolException(message="Required field avgColLen is unset!") + if self.numNulls is None: + raise TProtocolException(message="Required field numNulls is unset!") + if self.numDVs is None: + raise TProtocolException(message="Required field numDVs is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class BinaryColumnStatsData: + """ + Attributes: + - maxColLen + - avgColLen + - numNulls + - bitVectors + + """ + + def __init__( + self, + maxColLen=None, + avgColLen=None, + numNulls=None, + bitVectors=None, + ): + self.maxColLen = maxColLen + self.avgColLen = avgColLen + self.numNulls = numNulls + self.bitVectors = bitVectors + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.I64: + self.maxColLen = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.DOUBLE: + self.avgColLen = iprot.readDouble() + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.I64: + self.numNulls = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRING: + self.bitVectors = iprot.readBinary() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("BinaryColumnStatsData") + if self.maxColLen is not None: + oprot.writeFieldBegin("maxColLen", TType.I64, 1) + oprot.writeI64(self.maxColLen) + oprot.writeFieldEnd() + if self.avgColLen is not None: + oprot.writeFieldBegin("avgColLen", TType.DOUBLE, 2) + oprot.writeDouble(self.avgColLen) + oprot.writeFieldEnd() + if self.numNulls is not None: + oprot.writeFieldBegin("numNulls", TType.I64, 3) + oprot.writeI64(self.numNulls) + oprot.writeFieldEnd() + if self.bitVectors is not None: + oprot.writeFieldBegin("bitVectors", TType.STRING, 4) + oprot.writeBinary(self.bitVectors) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.maxColLen is None: + raise TProtocolException(message="Required field maxColLen is unset!") + if self.avgColLen is None: + raise TProtocolException(message="Required field avgColLen is unset!") + if self.numNulls is None: + raise TProtocolException(message="Required field numNulls is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class Decimal: + """ + Attributes: + - scale + - unscaled + + """ + + def __init__( + self, + scale=None, + unscaled=None, + ): + self.scale = scale + self.unscaled = unscaled + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 3: + if ftype == TType.I16: + self.scale = iprot.readI16() + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRING: + self.unscaled = iprot.readBinary() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("Decimal") + if self.unscaled is not None: + oprot.writeFieldBegin("unscaled", TType.STRING, 1) + oprot.writeBinary(self.unscaled) + oprot.writeFieldEnd() + if self.scale is not None: + oprot.writeFieldBegin("scale", TType.I16, 3) + oprot.writeI16(self.scale) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.scale is None: + raise TProtocolException(message="Required field scale is unset!") + if self.unscaled is None: + raise TProtocolException(message="Required field unscaled is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class DecimalColumnStatsData: + """ + Attributes: + - lowValue + - highValue + - numNulls + - numDVs + - bitVectors + + """ + + def __init__( + self, + lowValue=None, + highValue=None, + numNulls=None, + numDVs=None, + bitVectors=None, + ): + self.lowValue = lowValue + self.highValue = highValue + self.numNulls = numNulls + self.numDVs = numDVs + self.bitVectors = bitVectors + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.lowValue = Decimal() + self.lowValue.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.highValue = Decimal() + self.highValue.read(iprot) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.I64: + self.numNulls = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.I64: + self.numDVs = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.STRING: + self.bitVectors = iprot.readBinary() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("DecimalColumnStatsData") + if self.lowValue is not None: + oprot.writeFieldBegin("lowValue", TType.STRUCT, 1) + self.lowValue.write(oprot) + oprot.writeFieldEnd() + if self.highValue is not None: + oprot.writeFieldBegin("highValue", TType.STRUCT, 2) + self.highValue.write(oprot) + oprot.writeFieldEnd() + if self.numNulls is not None: + oprot.writeFieldBegin("numNulls", TType.I64, 3) + oprot.writeI64(self.numNulls) + oprot.writeFieldEnd() + if self.numDVs is not None: + oprot.writeFieldBegin("numDVs", TType.I64, 4) + oprot.writeI64(self.numDVs) + oprot.writeFieldEnd() + if self.bitVectors is not None: + oprot.writeFieldBegin("bitVectors", TType.STRING, 5) + oprot.writeBinary(self.bitVectors) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.numNulls is None: + raise TProtocolException(message="Required field numNulls is unset!") + if self.numDVs is None: + raise TProtocolException(message="Required field numDVs is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class Date: + """ + Attributes: + - daysSinceEpoch + + """ + + def __init__( + self, + daysSinceEpoch=None, + ): + self.daysSinceEpoch = daysSinceEpoch + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.I64: + self.daysSinceEpoch = iprot.readI64() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("Date") + if self.daysSinceEpoch is not None: + oprot.writeFieldBegin("daysSinceEpoch", TType.I64, 1) + oprot.writeI64(self.daysSinceEpoch) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.daysSinceEpoch is None: + raise TProtocolException(message="Required field daysSinceEpoch is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class DateColumnStatsData: + """ + Attributes: + - lowValue + - highValue + - numNulls + - numDVs + - bitVectors + + """ + + def __init__( + self, + lowValue=None, + highValue=None, + numNulls=None, + numDVs=None, + bitVectors=None, + ): + self.lowValue = lowValue + self.highValue = highValue + self.numNulls = numNulls + self.numDVs = numDVs + self.bitVectors = bitVectors + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.lowValue = Date() + self.lowValue.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.highValue = Date() + self.highValue.read(iprot) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.I64: + self.numNulls = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.I64: + self.numDVs = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.STRING: + self.bitVectors = iprot.readBinary() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("DateColumnStatsData") + if self.lowValue is not None: + oprot.writeFieldBegin("lowValue", TType.STRUCT, 1) + self.lowValue.write(oprot) + oprot.writeFieldEnd() + if self.highValue is not None: + oprot.writeFieldBegin("highValue", TType.STRUCT, 2) + self.highValue.write(oprot) + oprot.writeFieldEnd() + if self.numNulls is not None: + oprot.writeFieldBegin("numNulls", TType.I64, 3) + oprot.writeI64(self.numNulls) + oprot.writeFieldEnd() + if self.numDVs is not None: + oprot.writeFieldBegin("numDVs", TType.I64, 4) + oprot.writeI64(self.numDVs) + oprot.writeFieldEnd() + if self.bitVectors is not None: + oprot.writeFieldBegin("bitVectors", TType.STRING, 5) + oprot.writeBinary(self.bitVectors) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.numNulls is None: + raise TProtocolException(message="Required field numNulls is unset!") + if self.numDVs is None: + raise TProtocolException(message="Required field numDVs is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class Timestamp: + """ + Attributes: + - secondsSinceEpoch + + """ + + def __init__( + self, + secondsSinceEpoch=None, + ): + self.secondsSinceEpoch = secondsSinceEpoch + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.I64: + self.secondsSinceEpoch = iprot.readI64() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("Timestamp") + if self.secondsSinceEpoch is not None: + oprot.writeFieldBegin("secondsSinceEpoch", TType.I64, 1) + oprot.writeI64(self.secondsSinceEpoch) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.secondsSinceEpoch is None: + raise TProtocolException(message="Required field secondsSinceEpoch is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class TimestampColumnStatsData: + """ + Attributes: + - lowValue + - highValue + - numNulls + - numDVs + - bitVectors + + """ + + def __init__( + self, + lowValue=None, + highValue=None, + numNulls=None, + numDVs=None, + bitVectors=None, + ): + self.lowValue = lowValue + self.highValue = highValue + self.numNulls = numNulls + self.numDVs = numDVs + self.bitVectors = bitVectors + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.lowValue = Timestamp() + self.lowValue.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.highValue = Timestamp() + self.highValue.read(iprot) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.I64: + self.numNulls = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.I64: + self.numDVs = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.STRING: + self.bitVectors = iprot.readBinary() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("TimestampColumnStatsData") + if self.lowValue is not None: + oprot.writeFieldBegin("lowValue", TType.STRUCT, 1) + self.lowValue.write(oprot) + oprot.writeFieldEnd() + if self.highValue is not None: + oprot.writeFieldBegin("highValue", TType.STRUCT, 2) + self.highValue.write(oprot) + oprot.writeFieldEnd() + if self.numNulls is not None: + oprot.writeFieldBegin("numNulls", TType.I64, 3) + oprot.writeI64(self.numNulls) + oprot.writeFieldEnd() + if self.numDVs is not None: + oprot.writeFieldBegin("numDVs", TType.I64, 4) + oprot.writeI64(self.numDVs) + oprot.writeFieldEnd() + if self.bitVectors is not None: + oprot.writeFieldBegin("bitVectors", TType.STRING, 5) + oprot.writeBinary(self.bitVectors) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.numNulls is None: + raise TProtocolException(message="Required field numNulls is unset!") + if self.numDVs is None: + raise TProtocolException(message="Required field numDVs is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class ColumnStatisticsData: + """ + Attributes: + - booleanStats + - longStats + - doubleStats + - stringStats + - binaryStats + - decimalStats + - dateStats + - timestampStats + + """ + + def __init__( + self, + booleanStats=None, + longStats=None, + doubleStats=None, + stringStats=None, + binaryStats=None, + decimalStats=None, + dateStats=None, + timestampStats=None, + ): + self.booleanStats = booleanStats + self.longStats = longStats + self.doubleStats = doubleStats + self.stringStats = stringStats + self.binaryStats = binaryStats + self.decimalStats = decimalStats + self.dateStats = dateStats + self.timestampStats = timestampStats + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.booleanStats = BooleanColumnStatsData() + self.booleanStats.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.longStats = LongColumnStatsData() + self.longStats.read(iprot) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRUCT: + self.doubleStats = DoubleColumnStatsData() + self.doubleStats.read(iprot) + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRUCT: + self.stringStats = StringColumnStatsData() + self.stringStats.read(iprot) + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.STRUCT: + self.binaryStats = BinaryColumnStatsData() + self.binaryStats.read(iprot) + else: + iprot.skip(ftype) + elif fid == 6: + if ftype == TType.STRUCT: + self.decimalStats = DecimalColumnStatsData() + self.decimalStats.read(iprot) + else: + iprot.skip(ftype) + elif fid == 7: + if ftype == TType.STRUCT: + self.dateStats = DateColumnStatsData() + self.dateStats.read(iprot) + else: + iprot.skip(ftype) + elif fid == 8: + if ftype == TType.STRUCT: + self.timestampStats = TimestampColumnStatsData() + self.timestampStats.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("ColumnStatisticsData") + if self.booleanStats is not None: + oprot.writeFieldBegin("booleanStats", TType.STRUCT, 1) + self.booleanStats.write(oprot) + oprot.writeFieldEnd() + if self.longStats is not None: + oprot.writeFieldBegin("longStats", TType.STRUCT, 2) + self.longStats.write(oprot) + oprot.writeFieldEnd() + if self.doubleStats is not None: + oprot.writeFieldBegin("doubleStats", TType.STRUCT, 3) + self.doubleStats.write(oprot) + oprot.writeFieldEnd() + if self.stringStats is not None: + oprot.writeFieldBegin("stringStats", TType.STRUCT, 4) + self.stringStats.write(oprot) + oprot.writeFieldEnd() + if self.binaryStats is not None: + oprot.writeFieldBegin("binaryStats", TType.STRUCT, 5) + self.binaryStats.write(oprot) + oprot.writeFieldEnd() + if self.decimalStats is not None: + oprot.writeFieldBegin("decimalStats", TType.STRUCT, 6) + self.decimalStats.write(oprot) + oprot.writeFieldEnd() + if self.dateStats is not None: + oprot.writeFieldBegin("dateStats", TType.STRUCT, 7) + self.dateStats.write(oprot) + oprot.writeFieldEnd() + if self.timestampStats is not None: + oprot.writeFieldBegin("timestampStats", TType.STRUCT, 8) + self.timestampStats.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class ColumnStatisticsObj: + """ + Attributes: + - colName + - colType + - statsData + + """ + + def __init__( + self, + colName=None, + colType=None, + statsData=None, + ): + self.colName = colName + self.colType = colType + self.statsData = statsData + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.colName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.colType = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRUCT: + self.statsData = ColumnStatisticsData() + self.statsData.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("ColumnStatisticsObj") + if self.colName is not None: + oprot.writeFieldBegin("colName", TType.STRING, 1) + oprot.writeString(self.colName.encode("utf-8") if sys.version_info[0] == 2 else self.colName) + oprot.writeFieldEnd() + if self.colType is not None: + oprot.writeFieldBegin("colType", TType.STRING, 2) + oprot.writeString(self.colType.encode("utf-8") if sys.version_info[0] == 2 else self.colType) + oprot.writeFieldEnd() + if self.statsData is not None: + oprot.writeFieldBegin("statsData", TType.STRUCT, 3) + self.statsData.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.colName is None: + raise TProtocolException(message="Required field colName is unset!") + if self.colType is None: + raise TProtocolException(message="Required field colType is unset!") + if self.statsData is None: + raise TProtocolException(message="Required field statsData is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class ColumnStatisticsDesc: + """ + Attributes: + - isTblLevel + - dbName + - tableName + - partName + - lastAnalyzed + - catName + + """ + + def __init__( + self, + isTblLevel=None, + dbName=None, + tableName=None, + partName=None, + lastAnalyzed=None, + catName=None, + ): + self.isTblLevel = isTblLevel + self.dbName = dbName + self.tableName = tableName + self.partName = partName + self.lastAnalyzed = lastAnalyzed + self.catName = catName + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.BOOL: + self.isTblLevel = iprot.readBool() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.dbName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.tableName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRING: + self.partName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.I64: + self.lastAnalyzed = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 6: + if ftype == TType.STRING: + self.catName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("ColumnStatisticsDesc") + if self.isTblLevel is not None: + oprot.writeFieldBegin("isTblLevel", TType.BOOL, 1) + oprot.writeBool(self.isTblLevel) + oprot.writeFieldEnd() + if self.dbName is not None: + oprot.writeFieldBegin("dbName", TType.STRING, 2) + oprot.writeString(self.dbName.encode("utf-8") if sys.version_info[0] == 2 else self.dbName) + oprot.writeFieldEnd() + if self.tableName is not None: + oprot.writeFieldBegin("tableName", TType.STRING, 3) + oprot.writeString(self.tableName.encode("utf-8") if sys.version_info[0] == 2 else self.tableName) + oprot.writeFieldEnd() + if self.partName is not None: + oprot.writeFieldBegin("partName", TType.STRING, 4) + oprot.writeString(self.partName.encode("utf-8") if sys.version_info[0] == 2 else self.partName) + oprot.writeFieldEnd() + if self.lastAnalyzed is not None: + oprot.writeFieldBegin("lastAnalyzed", TType.I64, 5) + oprot.writeI64(self.lastAnalyzed) + oprot.writeFieldEnd() + if self.catName is not None: + oprot.writeFieldBegin("catName", TType.STRING, 6) + oprot.writeString(self.catName.encode("utf-8") if sys.version_info[0] == 2 else self.catName) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.isTblLevel is None: + raise TProtocolException(message="Required field isTblLevel is unset!") + if self.dbName is None: + raise TProtocolException(message="Required field dbName is unset!") + if self.tableName is None: + raise TProtocolException(message="Required field tableName is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class ColumnStatistics: + """ + Attributes: + - statsDesc + - statsObj + - isStatsCompliant + - engine + + """ + + def __init__( + self, + statsDesc=None, + statsObj=None, + isStatsCompliant=None, + engine=None, + ): + self.statsDesc = statsDesc + self.statsObj = statsObj + self.isStatsCompliant = isStatsCompliant + self.engine = engine + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.statsDesc = ColumnStatisticsDesc() + self.statsDesc.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.LIST: + self.statsObj = [] + (_etype250, _size247) = iprot.readListBegin() + for _i251 in range(_size247): + _elem252 = ColumnStatisticsObj() + _elem252.read(iprot) + self.statsObj.append(_elem252) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.BOOL: + self.isStatsCompliant = iprot.readBool() + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRING: + self.engine = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("ColumnStatistics") + if self.statsDesc is not None: + oprot.writeFieldBegin("statsDesc", TType.STRUCT, 1) + self.statsDesc.write(oprot) + oprot.writeFieldEnd() + if self.statsObj is not None: + oprot.writeFieldBegin("statsObj", TType.LIST, 2) + oprot.writeListBegin(TType.STRUCT, len(self.statsObj)) + for iter253 in self.statsObj: + iter253.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.isStatsCompliant is not None: + oprot.writeFieldBegin("isStatsCompliant", TType.BOOL, 3) + oprot.writeBool(self.isStatsCompliant) + oprot.writeFieldEnd() + if self.engine is not None: + oprot.writeFieldBegin("engine", TType.STRING, 4) + oprot.writeString(self.engine.encode("utf-8") if sys.version_info[0] == 2 else self.engine) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.statsDesc is None: + raise TProtocolException(message="Required field statsDesc is unset!") + if self.statsObj is None: + raise TProtocolException(message="Required field statsObj is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class FileMetadata: + """ + Attributes: + - type + - version + - data + + """ + + def __init__( + self, + type=1, + version=1, + data=None, + ): + self.type = type + self.version = version + self.data = data + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.BYTE: + self.type = iprot.readByte() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.BYTE: + self.version = iprot.readByte() + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.LIST: + self.data = [] + (_etype257, _size254) = iprot.readListBegin() + for _i258 in range(_size254): + _elem259 = iprot.readBinary() + self.data.append(_elem259) + iprot.readListEnd() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("FileMetadata") + if self.type is not None: + oprot.writeFieldBegin("type", TType.BYTE, 1) + oprot.writeByte(self.type) + oprot.writeFieldEnd() + if self.version is not None: + oprot.writeFieldBegin("version", TType.BYTE, 2) + oprot.writeByte(self.version) + oprot.writeFieldEnd() + if self.data is not None: + oprot.writeFieldBegin("data", TType.LIST, 3) + oprot.writeListBegin(TType.STRING, len(self.data)) + for iter260 in self.data: + oprot.writeBinary(iter260) + oprot.writeListEnd() + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class ObjectDictionary: + """ + Attributes: + - values + + """ + + def __init__( + self, + values=None, + ): + self.values = values + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.MAP: + self.values = {} + (_ktype262, _vtype263, _size261) = iprot.readMapBegin() + for _i265 in range(_size261): + _key266 = ( + iprot.readString().decode("utf-8", errors="replace") + if sys.version_info[0] == 2 + else iprot.readString() + ) + _val267 = [] + (_etype271, _size268) = iprot.readListBegin() + for _i272 in range(_size268): + _elem273 = iprot.readBinary() + _val267.append(_elem273) + iprot.readListEnd() + self.values[_key266] = _val267 + iprot.readMapEnd() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("ObjectDictionary") + if self.values is not None: + oprot.writeFieldBegin("values", TType.MAP, 1) + oprot.writeMapBegin(TType.STRING, TType.LIST, len(self.values)) + for kiter274, viter275 in self.values.items(): + oprot.writeString(kiter274.encode("utf-8") if sys.version_info[0] == 2 else kiter274) + oprot.writeListBegin(TType.STRING, len(viter275)) + for iter276 in viter275: + oprot.writeBinary(iter276) + oprot.writeListEnd() + oprot.writeMapEnd() + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.values is None: + raise TProtocolException(message="Required field values is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class Table: + """ + Attributes: + - tableName + - dbName + - owner + - createTime + - lastAccessTime + - retention + - sd + - partitionKeys + - parameters + - viewOriginalText + - viewExpandedText + - tableType + - privileges + - temporary + - rewriteEnabled + - creationMetadata + - catName + - ownerType + - writeId + - isStatsCompliant + - colStats + - accessType + - requiredReadCapabilities + - requiredWriteCapabilities + - id + - fileMetadata + - dictionary + - txnId + + """ + + def __init__( + self, + tableName=None, + dbName=None, + owner=None, + createTime=None, + lastAccessTime=None, + retention=None, + sd=None, + partitionKeys=None, + parameters=None, + viewOriginalText=None, + viewExpandedText=None, + tableType=None, + privileges=None, + temporary=False, + rewriteEnabled=None, + creationMetadata=None, + catName=None, + ownerType=1, + writeId=-1, + isStatsCompliant=None, + colStats=None, + accessType=None, + requiredReadCapabilities=None, + requiredWriteCapabilities=None, + id=None, + fileMetadata=None, + dictionary=None, + txnId=None, + ): + self.tableName = tableName + self.dbName = dbName + self.owner = owner + self.createTime = createTime + self.lastAccessTime = lastAccessTime + self.retention = retention + self.sd = sd + self.partitionKeys = partitionKeys + self.parameters = parameters + self.viewOriginalText = viewOriginalText + self.viewExpandedText = viewExpandedText + self.tableType = tableType + self.privileges = privileges + self.temporary = temporary + self.rewriteEnabled = rewriteEnabled + self.creationMetadata = creationMetadata + self.catName = catName + self.ownerType = ownerType + self.writeId = writeId + self.isStatsCompliant = isStatsCompliant + self.colStats = colStats + self.accessType = accessType + self.requiredReadCapabilities = requiredReadCapabilities + self.requiredWriteCapabilities = requiredWriteCapabilities + self.id = id + self.fileMetadata = fileMetadata + self.dictionary = dictionary + self.txnId = txnId + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.tableName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.dbName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.owner = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.I32: + self.createTime = iprot.readI32() + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.I32: + self.lastAccessTime = iprot.readI32() + else: + iprot.skip(ftype) + elif fid == 6: + if ftype == TType.I32: + self.retention = iprot.readI32() + else: + iprot.skip(ftype) + elif fid == 7: + if ftype == TType.STRUCT: + self.sd = StorageDescriptor() + self.sd.read(iprot) + else: + iprot.skip(ftype) + elif fid == 8: + if ftype == TType.LIST: + self.partitionKeys = [] + (_etype280, _size277) = iprot.readListBegin() + for _i281 in range(_size277): + _elem282 = FieldSchema() + _elem282.read(iprot) + self.partitionKeys.append(_elem282) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 9: + if ftype == TType.MAP: + self.parameters = {} + (_ktype284, _vtype285, _size283) = iprot.readMapBegin() + for _i287 in range(_size283): + _key288 = ( + iprot.readString().decode("utf-8", errors="replace") + if sys.version_info[0] == 2 + else iprot.readString() + ) + _val289 = ( + iprot.readString().decode("utf-8", errors="replace") + if sys.version_info[0] == 2 + else iprot.readString() + ) + self.parameters[_key288] = _val289 + iprot.readMapEnd() + else: + iprot.skip(ftype) + elif fid == 10: + if ftype == TType.STRING: + self.viewOriginalText = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 11: + if ftype == TType.STRING: + self.viewExpandedText = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 12: + if ftype == TType.STRING: + self.tableType = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 13: + if ftype == TType.STRUCT: + self.privileges = PrincipalPrivilegeSet() + self.privileges.read(iprot) + else: + iprot.skip(ftype) + elif fid == 14: + if ftype == TType.BOOL: + self.temporary = iprot.readBool() + else: + iprot.skip(ftype) + elif fid == 15: + if ftype == TType.BOOL: + self.rewriteEnabled = iprot.readBool() + else: + iprot.skip(ftype) + elif fid == 16: + if ftype == TType.STRUCT: + self.creationMetadata = CreationMetadata() + self.creationMetadata.read(iprot) + else: + iprot.skip(ftype) + elif fid == 17: + if ftype == TType.STRING: + self.catName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 18: + if ftype == TType.I32: + self.ownerType = iprot.readI32() + else: + iprot.skip(ftype) + elif fid == 19: + if ftype == TType.I64: + self.writeId = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 20: + if ftype == TType.BOOL: + self.isStatsCompliant = iprot.readBool() + else: + iprot.skip(ftype) + elif fid == 21: + if ftype == TType.STRUCT: + self.colStats = ColumnStatistics() + self.colStats.read(iprot) + else: + iprot.skip(ftype) + elif fid == 22: + if ftype == TType.BYTE: + self.accessType = iprot.readByte() + else: + iprot.skip(ftype) + elif fid == 23: + if ftype == TType.LIST: + self.requiredReadCapabilities = [] + (_etype293, _size290) = iprot.readListBegin() + for _i294 in range(_size290): + _elem295 = ( + iprot.readString().decode("utf-8", errors="replace") + if sys.version_info[0] == 2 + else iprot.readString() + ) + self.requiredReadCapabilities.append(_elem295) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 24: + if ftype == TType.LIST: + self.requiredWriteCapabilities = [] + (_etype299, _size296) = iprot.readListBegin() + for _i300 in range(_size296): + _elem301 = ( + iprot.readString().decode("utf-8", errors="replace") + if sys.version_info[0] == 2 + else iprot.readString() + ) + self.requiredWriteCapabilities.append(_elem301) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 25: + if ftype == TType.I64: + self.id = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 26: + if ftype == TType.STRUCT: + self.fileMetadata = FileMetadata() + self.fileMetadata.read(iprot) + else: + iprot.skip(ftype) + elif fid == 27: + if ftype == TType.STRUCT: + self.dictionary = ObjectDictionary() + self.dictionary.read(iprot) + else: + iprot.skip(ftype) + elif fid == 28: + if ftype == TType.I64: + self.txnId = iprot.readI64() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("Table") + if self.tableName is not None: + oprot.writeFieldBegin("tableName", TType.STRING, 1) + oprot.writeString(self.tableName.encode("utf-8") if sys.version_info[0] == 2 else self.tableName) + oprot.writeFieldEnd() + if self.dbName is not None: + oprot.writeFieldBegin("dbName", TType.STRING, 2) + oprot.writeString(self.dbName.encode("utf-8") if sys.version_info[0] == 2 else self.dbName) + oprot.writeFieldEnd() + if self.owner is not None: + oprot.writeFieldBegin("owner", TType.STRING, 3) + oprot.writeString(self.owner.encode("utf-8") if sys.version_info[0] == 2 else self.owner) + oprot.writeFieldEnd() + if self.createTime is not None: + oprot.writeFieldBegin("createTime", TType.I32, 4) + oprot.writeI32(self.createTime) + oprot.writeFieldEnd() + if self.lastAccessTime is not None: + oprot.writeFieldBegin("lastAccessTime", TType.I32, 5) + oprot.writeI32(self.lastAccessTime) + oprot.writeFieldEnd() + if self.retention is not None: + oprot.writeFieldBegin("retention", TType.I32, 6) + oprot.writeI32(self.retention) + oprot.writeFieldEnd() + if self.sd is not None: + oprot.writeFieldBegin("sd", TType.STRUCT, 7) + self.sd.write(oprot) + oprot.writeFieldEnd() + if self.partitionKeys is not None: + oprot.writeFieldBegin("partitionKeys", TType.LIST, 8) + oprot.writeListBegin(TType.STRUCT, len(self.partitionKeys)) + for iter302 in self.partitionKeys: + iter302.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.parameters is not None: + oprot.writeFieldBegin("parameters", TType.MAP, 9) + oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.parameters)) + for kiter303, viter304 in self.parameters.items(): + oprot.writeString(kiter303.encode("utf-8") if sys.version_info[0] == 2 else kiter303) + oprot.writeString(viter304.encode("utf-8") if sys.version_info[0] == 2 else viter304) + oprot.writeMapEnd() + oprot.writeFieldEnd() + if self.viewOriginalText is not None: + oprot.writeFieldBegin("viewOriginalText", TType.STRING, 10) + oprot.writeString(self.viewOriginalText.encode("utf-8") if sys.version_info[0] == 2 else self.viewOriginalText) + oprot.writeFieldEnd() + if self.viewExpandedText is not None: + oprot.writeFieldBegin("viewExpandedText", TType.STRING, 11) + oprot.writeString(self.viewExpandedText.encode("utf-8") if sys.version_info[0] == 2 else self.viewExpandedText) + oprot.writeFieldEnd() + if self.tableType is not None: + oprot.writeFieldBegin("tableType", TType.STRING, 12) + oprot.writeString(self.tableType.encode("utf-8") if sys.version_info[0] == 2 else self.tableType) + oprot.writeFieldEnd() + if self.privileges is not None: + oprot.writeFieldBegin("privileges", TType.STRUCT, 13) + self.privileges.write(oprot) + oprot.writeFieldEnd() + if self.temporary is not None: + oprot.writeFieldBegin("temporary", TType.BOOL, 14) + oprot.writeBool(self.temporary) + oprot.writeFieldEnd() + if self.rewriteEnabled is not None: + oprot.writeFieldBegin("rewriteEnabled", TType.BOOL, 15) + oprot.writeBool(self.rewriteEnabled) + oprot.writeFieldEnd() + if self.creationMetadata is not None: + oprot.writeFieldBegin("creationMetadata", TType.STRUCT, 16) + self.creationMetadata.write(oprot) + oprot.writeFieldEnd() + if self.catName is not None: + oprot.writeFieldBegin("catName", TType.STRING, 17) + oprot.writeString(self.catName.encode("utf-8") if sys.version_info[0] == 2 else self.catName) + oprot.writeFieldEnd() + if self.ownerType is not None: + oprot.writeFieldBegin("ownerType", TType.I32, 18) + oprot.writeI32(self.ownerType) + oprot.writeFieldEnd() + if self.writeId is not None: + oprot.writeFieldBegin("writeId", TType.I64, 19) + oprot.writeI64(self.writeId) + oprot.writeFieldEnd() + if self.isStatsCompliant is not None: + oprot.writeFieldBegin("isStatsCompliant", TType.BOOL, 20) + oprot.writeBool(self.isStatsCompliant) + oprot.writeFieldEnd() + if self.colStats is not None: + oprot.writeFieldBegin("colStats", TType.STRUCT, 21) + self.colStats.write(oprot) + oprot.writeFieldEnd() + if self.accessType is not None: + oprot.writeFieldBegin("accessType", TType.BYTE, 22) + oprot.writeByte(self.accessType) + oprot.writeFieldEnd() + if self.requiredReadCapabilities is not None: + oprot.writeFieldBegin("requiredReadCapabilities", TType.LIST, 23) + oprot.writeListBegin(TType.STRING, len(self.requiredReadCapabilities)) + for iter305 in self.requiredReadCapabilities: + oprot.writeString(iter305.encode("utf-8") if sys.version_info[0] == 2 else iter305) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.requiredWriteCapabilities is not None: + oprot.writeFieldBegin("requiredWriteCapabilities", TType.LIST, 24) + oprot.writeListBegin(TType.STRING, len(self.requiredWriteCapabilities)) + for iter306 in self.requiredWriteCapabilities: + oprot.writeString(iter306.encode("utf-8") if sys.version_info[0] == 2 else iter306) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.id is not None: + oprot.writeFieldBegin("id", TType.I64, 25) + oprot.writeI64(self.id) + oprot.writeFieldEnd() + if self.fileMetadata is not None: + oprot.writeFieldBegin("fileMetadata", TType.STRUCT, 26) + self.fileMetadata.write(oprot) + oprot.writeFieldEnd() + if self.dictionary is not None: + oprot.writeFieldBegin("dictionary", TType.STRUCT, 27) + self.dictionary.write(oprot) + oprot.writeFieldEnd() + if self.txnId is not None: + oprot.writeFieldBegin("txnId", TType.I64, 28) + oprot.writeI64(self.txnId) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class SourceTable: + """ + Attributes: + - table + - insertedCount + - updatedCount + - deletedCount + + """ + + def __init__( + self, + table=None, + insertedCount=None, + updatedCount=None, + deletedCount=None, + ): + self.table = table + self.insertedCount = insertedCount + self.updatedCount = updatedCount + self.deletedCount = deletedCount + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.table = Table() + self.table.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.I64: + self.insertedCount = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.I64: + self.updatedCount = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.I64: + self.deletedCount = iprot.readI64() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("SourceTable") + if self.table is not None: + oprot.writeFieldBegin("table", TType.STRUCT, 1) + self.table.write(oprot) + oprot.writeFieldEnd() + if self.insertedCount is not None: + oprot.writeFieldBegin("insertedCount", TType.I64, 2) + oprot.writeI64(self.insertedCount) + oprot.writeFieldEnd() + if self.updatedCount is not None: + oprot.writeFieldBegin("updatedCount", TType.I64, 3) + oprot.writeI64(self.updatedCount) + oprot.writeFieldEnd() + if self.deletedCount is not None: + oprot.writeFieldBegin("deletedCount", TType.I64, 4) + oprot.writeI64(self.deletedCount) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.table is None: + raise TProtocolException(message="Required field table is unset!") + if self.insertedCount is None: + raise TProtocolException(message="Required field insertedCount is unset!") + if self.updatedCount is None: + raise TProtocolException(message="Required field updatedCount is unset!") + if self.deletedCount is None: + raise TProtocolException(message="Required field deletedCount is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class Partition: + """ + Attributes: + - values + - dbName + - tableName + - createTime + - lastAccessTime + - sd + - parameters + - privileges + - catName + - writeId + - isStatsCompliant + - colStats + - fileMetadata + + """ + + def __init__( + self, + values=None, + dbName=None, + tableName=None, + createTime=None, + lastAccessTime=None, + sd=None, + parameters=None, + privileges=None, + catName=None, + writeId=-1, + isStatsCompliant=None, + colStats=None, + fileMetadata=None, + ): + self.values = values + self.dbName = dbName + self.tableName = tableName + self.createTime = createTime + self.lastAccessTime = lastAccessTime + self.sd = sd + self.parameters = parameters + self.privileges = privileges + self.catName = catName + self.writeId = writeId + self.isStatsCompliant = isStatsCompliant + self.colStats = colStats + self.fileMetadata = fileMetadata + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.LIST: + self.values = [] + (_etype310, _size307) = iprot.readListBegin() + for _i311 in range(_size307): + _elem312 = ( + iprot.readString().decode("utf-8", errors="replace") + if sys.version_info[0] == 2 + else iprot.readString() + ) + self.values.append(_elem312) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.dbName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.tableName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.I32: + self.createTime = iprot.readI32() + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.I32: + self.lastAccessTime = iprot.readI32() + else: + iprot.skip(ftype) + elif fid == 6: + if ftype == TType.STRUCT: + self.sd = StorageDescriptor() + self.sd.read(iprot) + else: + iprot.skip(ftype) + elif fid == 7: + if ftype == TType.MAP: + self.parameters = {} + (_ktype314, _vtype315, _size313) = iprot.readMapBegin() + for _i317 in range(_size313): + _key318 = ( + iprot.readString().decode("utf-8", errors="replace") + if sys.version_info[0] == 2 + else iprot.readString() + ) + _val319 = ( + iprot.readString().decode("utf-8", errors="replace") + if sys.version_info[0] == 2 + else iprot.readString() + ) + self.parameters[_key318] = _val319 + iprot.readMapEnd() + else: + iprot.skip(ftype) + elif fid == 8: + if ftype == TType.STRUCT: + self.privileges = PrincipalPrivilegeSet() + self.privileges.read(iprot) + else: + iprot.skip(ftype) + elif fid == 9: + if ftype == TType.STRING: + self.catName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 10: + if ftype == TType.I64: + self.writeId = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 11: + if ftype == TType.BOOL: + self.isStatsCompliant = iprot.readBool() + else: + iprot.skip(ftype) + elif fid == 12: + if ftype == TType.STRUCT: + self.colStats = ColumnStatistics() + self.colStats.read(iprot) + else: + iprot.skip(ftype) + elif fid == 13: + if ftype == TType.STRUCT: + self.fileMetadata = FileMetadata() + self.fileMetadata.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("Partition") + if self.values is not None: + oprot.writeFieldBegin("values", TType.LIST, 1) + oprot.writeListBegin(TType.STRING, len(self.values)) + for iter320 in self.values: + oprot.writeString(iter320.encode("utf-8") if sys.version_info[0] == 2 else iter320) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.dbName is not None: + oprot.writeFieldBegin("dbName", TType.STRING, 2) + oprot.writeString(self.dbName.encode("utf-8") if sys.version_info[0] == 2 else self.dbName) + oprot.writeFieldEnd() + if self.tableName is not None: + oprot.writeFieldBegin("tableName", TType.STRING, 3) + oprot.writeString(self.tableName.encode("utf-8") if sys.version_info[0] == 2 else self.tableName) + oprot.writeFieldEnd() + if self.createTime is not None: + oprot.writeFieldBegin("createTime", TType.I32, 4) + oprot.writeI32(self.createTime) + oprot.writeFieldEnd() + if self.lastAccessTime is not None: + oprot.writeFieldBegin("lastAccessTime", TType.I32, 5) + oprot.writeI32(self.lastAccessTime) + oprot.writeFieldEnd() + if self.sd is not None: + oprot.writeFieldBegin("sd", TType.STRUCT, 6) + self.sd.write(oprot) + oprot.writeFieldEnd() + if self.parameters is not None: + oprot.writeFieldBegin("parameters", TType.MAP, 7) + oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.parameters)) + for kiter321, viter322 in self.parameters.items(): + oprot.writeString(kiter321.encode("utf-8") if sys.version_info[0] == 2 else kiter321) + oprot.writeString(viter322.encode("utf-8") if sys.version_info[0] == 2 else viter322) + oprot.writeMapEnd() + oprot.writeFieldEnd() + if self.privileges is not None: + oprot.writeFieldBegin("privileges", TType.STRUCT, 8) + self.privileges.write(oprot) + oprot.writeFieldEnd() + if self.catName is not None: + oprot.writeFieldBegin("catName", TType.STRING, 9) + oprot.writeString(self.catName.encode("utf-8") if sys.version_info[0] == 2 else self.catName) + oprot.writeFieldEnd() + if self.writeId is not None: + oprot.writeFieldBegin("writeId", TType.I64, 10) + oprot.writeI64(self.writeId) + oprot.writeFieldEnd() + if self.isStatsCompliant is not None: + oprot.writeFieldBegin("isStatsCompliant", TType.BOOL, 11) + oprot.writeBool(self.isStatsCompliant) + oprot.writeFieldEnd() + if self.colStats is not None: + oprot.writeFieldBegin("colStats", TType.STRUCT, 12) + self.colStats.write(oprot) + oprot.writeFieldEnd() + if self.fileMetadata is not None: + oprot.writeFieldBegin("fileMetadata", TType.STRUCT, 13) + self.fileMetadata.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class PartitionWithoutSD: + """ + Attributes: + - values + - createTime + - lastAccessTime + - relativePath + - parameters + - privileges + + """ + + def __init__( + self, + values=None, + createTime=None, + lastAccessTime=None, + relativePath=None, + parameters=None, + privileges=None, + ): + self.values = values + self.createTime = createTime + self.lastAccessTime = lastAccessTime + self.relativePath = relativePath + self.parameters = parameters + self.privileges = privileges + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.LIST: + self.values = [] + (_etype326, _size323) = iprot.readListBegin() + for _i327 in range(_size323): + _elem328 = ( + iprot.readString().decode("utf-8", errors="replace") + if sys.version_info[0] == 2 + else iprot.readString() + ) + self.values.append(_elem328) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.I32: + self.createTime = iprot.readI32() + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.I32: + self.lastAccessTime = iprot.readI32() + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRING: + self.relativePath = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.MAP: + self.parameters = {} + (_ktype330, _vtype331, _size329) = iprot.readMapBegin() + for _i333 in range(_size329): + _key334 = ( + iprot.readString().decode("utf-8", errors="replace") + if sys.version_info[0] == 2 + else iprot.readString() + ) + _val335 = ( + iprot.readString().decode("utf-8", errors="replace") + if sys.version_info[0] == 2 + else iprot.readString() + ) + self.parameters[_key334] = _val335 + iprot.readMapEnd() + else: + iprot.skip(ftype) + elif fid == 6: + if ftype == TType.STRUCT: + self.privileges = PrincipalPrivilegeSet() + self.privileges.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("PartitionWithoutSD") + if self.values is not None: + oprot.writeFieldBegin("values", TType.LIST, 1) + oprot.writeListBegin(TType.STRING, len(self.values)) + for iter336 in self.values: + oprot.writeString(iter336.encode("utf-8") if sys.version_info[0] == 2 else iter336) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.createTime is not None: + oprot.writeFieldBegin("createTime", TType.I32, 2) + oprot.writeI32(self.createTime) + oprot.writeFieldEnd() + if self.lastAccessTime is not None: + oprot.writeFieldBegin("lastAccessTime", TType.I32, 3) + oprot.writeI32(self.lastAccessTime) + oprot.writeFieldEnd() + if self.relativePath is not None: + oprot.writeFieldBegin("relativePath", TType.STRING, 4) + oprot.writeString(self.relativePath.encode("utf-8") if sys.version_info[0] == 2 else self.relativePath) + oprot.writeFieldEnd() + if self.parameters is not None: + oprot.writeFieldBegin("parameters", TType.MAP, 5) + oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.parameters)) + for kiter337, viter338 in self.parameters.items(): + oprot.writeString(kiter337.encode("utf-8") if sys.version_info[0] == 2 else kiter337) + oprot.writeString(viter338.encode("utf-8") if sys.version_info[0] == 2 else viter338) + oprot.writeMapEnd() + oprot.writeFieldEnd() + if self.privileges is not None: + oprot.writeFieldBegin("privileges", TType.STRUCT, 6) + self.privileges.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class PartitionSpecWithSharedSD: + """ + Attributes: + - partitions + - sd + + """ + + def __init__( + self, + partitions=None, + sd=None, + ): + self.partitions = partitions + self.sd = sd + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.LIST: + self.partitions = [] + (_etype342, _size339) = iprot.readListBegin() + for _i343 in range(_size339): + _elem344 = PartitionWithoutSD() + _elem344.read(iprot) + self.partitions.append(_elem344) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.sd = StorageDescriptor() + self.sd.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("PartitionSpecWithSharedSD") + if self.partitions is not None: + oprot.writeFieldBegin("partitions", TType.LIST, 1) + oprot.writeListBegin(TType.STRUCT, len(self.partitions)) + for iter345 in self.partitions: + iter345.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.sd is not None: + oprot.writeFieldBegin("sd", TType.STRUCT, 2) + self.sd.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class PartitionListComposingSpec: + """ + Attributes: + - partitions + + """ + + def __init__( + self, + partitions=None, + ): + self.partitions = partitions + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.LIST: + self.partitions = [] + (_etype349, _size346) = iprot.readListBegin() + for _i350 in range(_size346): + _elem351 = Partition() + _elem351.read(iprot) + self.partitions.append(_elem351) + iprot.readListEnd() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("PartitionListComposingSpec") + if self.partitions is not None: + oprot.writeFieldBegin("partitions", TType.LIST, 1) + oprot.writeListBegin(TType.STRUCT, len(self.partitions)) + for iter352 in self.partitions: + iter352.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class PartitionSpec: + """ + Attributes: + - dbName + - tableName + - rootPath + - sharedSDPartitionSpec + - partitionList + - catName + - writeId + - isStatsCompliant + + """ + + def __init__( + self, + dbName=None, + tableName=None, + rootPath=None, + sharedSDPartitionSpec=None, + partitionList=None, + catName=None, + writeId=-1, + isStatsCompliant=None, + ): + self.dbName = dbName + self.tableName = tableName + self.rootPath = rootPath + self.sharedSDPartitionSpec = sharedSDPartitionSpec + self.partitionList = partitionList + self.catName = catName + self.writeId = writeId + self.isStatsCompliant = isStatsCompliant + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.dbName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.tableName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.rootPath = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRUCT: + self.sharedSDPartitionSpec = PartitionSpecWithSharedSD() + self.sharedSDPartitionSpec.read(iprot) + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.STRUCT: + self.partitionList = PartitionListComposingSpec() + self.partitionList.read(iprot) + else: + iprot.skip(ftype) + elif fid == 6: + if ftype == TType.STRING: + self.catName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 7: + if ftype == TType.I64: + self.writeId = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 8: + if ftype == TType.BOOL: + self.isStatsCompliant = iprot.readBool() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("PartitionSpec") + if self.dbName is not None: + oprot.writeFieldBegin("dbName", TType.STRING, 1) + oprot.writeString(self.dbName.encode("utf-8") if sys.version_info[0] == 2 else self.dbName) + oprot.writeFieldEnd() + if self.tableName is not None: + oprot.writeFieldBegin("tableName", TType.STRING, 2) + oprot.writeString(self.tableName.encode("utf-8") if sys.version_info[0] == 2 else self.tableName) + oprot.writeFieldEnd() + if self.rootPath is not None: + oprot.writeFieldBegin("rootPath", TType.STRING, 3) + oprot.writeString(self.rootPath.encode("utf-8") if sys.version_info[0] == 2 else self.rootPath) + oprot.writeFieldEnd() + if self.sharedSDPartitionSpec is not None: + oprot.writeFieldBegin("sharedSDPartitionSpec", TType.STRUCT, 4) + self.sharedSDPartitionSpec.write(oprot) + oprot.writeFieldEnd() + if self.partitionList is not None: + oprot.writeFieldBegin("partitionList", TType.STRUCT, 5) + self.partitionList.write(oprot) + oprot.writeFieldEnd() + if self.catName is not None: + oprot.writeFieldBegin("catName", TType.STRING, 6) + oprot.writeString(self.catName.encode("utf-8") if sys.version_info[0] == 2 else self.catName) + oprot.writeFieldEnd() + if self.writeId is not None: + oprot.writeFieldBegin("writeId", TType.I64, 7) + oprot.writeI64(self.writeId) + oprot.writeFieldEnd() + if self.isStatsCompliant is not None: + oprot.writeFieldBegin("isStatsCompliant", TType.BOOL, 8) + oprot.writeBool(self.isStatsCompliant) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class AggrStats: + """ + Attributes: + - colStats + - partsFound + - isStatsCompliant + + """ + + def __init__( + self, + colStats=None, + partsFound=None, + isStatsCompliant=None, + ): + self.colStats = colStats + self.partsFound = partsFound + self.isStatsCompliant = isStatsCompliant + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.LIST: + self.colStats = [] + (_etype356, _size353) = iprot.readListBegin() + for _i357 in range(_size353): + _elem358 = ColumnStatisticsObj() + _elem358.read(iprot) + self.colStats.append(_elem358) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.I64: + self.partsFound = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.BOOL: + self.isStatsCompliant = iprot.readBool() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("AggrStats") + if self.colStats is not None: + oprot.writeFieldBegin("colStats", TType.LIST, 1) + oprot.writeListBegin(TType.STRUCT, len(self.colStats)) + for iter359 in self.colStats: + iter359.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.partsFound is not None: + oprot.writeFieldBegin("partsFound", TType.I64, 2) + oprot.writeI64(self.partsFound) + oprot.writeFieldEnd() + if self.isStatsCompliant is not None: + oprot.writeFieldBegin("isStatsCompliant", TType.BOOL, 3) + oprot.writeBool(self.isStatsCompliant) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.colStats is None: + raise TProtocolException(message="Required field colStats is unset!") + if self.partsFound is None: + raise TProtocolException(message="Required field partsFound is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class SetPartitionsStatsRequest: + """ + Attributes: + - colStats + - needMerge + - writeId + - validWriteIdList + - engine + + """ + + def __init__( + self, + colStats=None, + needMerge=None, + writeId=-1, + validWriteIdList=None, + engine=None, + ): + self.colStats = colStats + self.needMerge = needMerge + self.writeId = writeId + self.validWriteIdList = validWriteIdList + self.engine = engine + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.LIST: + self.colStats = [] + (_etype363, _size360) = iprot.readListBegin() + for _i364 in range(_size360): + _elem365 = ColumnStatistics() + _elem365.read(iprot) + self.colStats.append(_elem365) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.BOOL: + self.needMerge = iprot.readBool() + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.I64: + self.writeId = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRING: + self.validWriteIdList = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.STRING: + self.engine = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("SetPartitionsStatsRequest") + if self.colStats is not None: + oprot.writeFieldBegin("colStats", TType.LIST, 1) + oprot.writeListBegin(TType.STRUCT, len(self.colStats)) + for iter366 in self.colStats: + iter366.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.needMerge is not None: + oprot.writeFieldBegin("needMerge", TType.BOOL, 2) + oprot.writeBool(self.needMerge) + oprot.writeFieldEnd() + if self.writeId is not None: + oprot.writeFieldBegin("writeId", TType.I64, 3) + oprot.writeI64(self.writeId) + oprot.writeFieldEnd() + if self.validWriteIdList is not None: + oprot.writeFieldBegin("validWriteIdList", TType.STRING, 4) + oprot.writeString(self.validWriteIdList.encode("utf-8") if sys.version_info[0] == 2 else self.validWriteIdList) + oprot.writeFieldEnd() + if self.engine is not None: + oprot.writeFieldBegin("engine", TType.STRING, 5) + oprot.writeString(self.engine.encode("utf-8") if sys.version_info[0] == 2 else self.engine) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.colStats is None: + raise TProtocolException(message="Required field colStats is unset!") + if self.engine is None: + raise TProtocolException(message="Required field engine is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class SetPartitionsStatsResponse: + """ + Attributes: + - result + + """ + + def __init__( + self, + result=None, + ): + self.result = result + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.BOOL: + self.result = iprot.readBool() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("SetPartitionsStatsResponse") + if self.result is not None: + oprot.writeFieldBegin("result", TType.BOOL, 1) + oprot.writeBool(self.result) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.result is None: + raise TProtocolException(message="Required field result is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class Schema: + """ + Attributes: + - fieldSchemas + - properties + + """ + + def __init__( + self, + fieldSchemas=None, + properties=None, + ): + self.fieldSchemas = fieldSchemas + self.properties = properties + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.LIST: + self.fieldSchemas = [] + (_etype370, _size367) = iprot.readListBegin() + for _i371 in range(_size367): + _elem372 = FieldSchema() + _elem372.read(iprot) + self.fieldSchemas.append(_elem372) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.MAP: + self.properties = {} + (_ktype374, _vtype375, _size373) = iprot.readMapBegin() + for _i377 in range(_size373): + _key378 = ( + iprot.readString().decode("utf-8", errors="replace") + if sys.version_info[0] == 2 + else iprot.readString() + ) + _val379 = ( + iprot.readString().decode("utf-8", errors="replace") + if sys.version_info[0] == 2 + else iprot.readString() + ) + self.properties[_key378] = _val379 + iprot.readMapEnd() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("Schema") + if self.fieldSchemas is not None: + oprot.writeFieldBegin("fieldSchemas", TType.LIST, 1) + oprot.writeListBegin(TType.STRUCT, len(self.fieldSchemas)) + for iter380 in self.fieldSchemas: + iter380.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.properties is not None: + oprot.writeFieldBegin("properties", TType.MAP, 2) + oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.properties)) + for kiter381, viter382 in self.properties.items(): + oprot.writeString(kiter381.encode("utf-8") if sys.version_info[0] == 2 else kiter381) + oprot.writeString(viter382.encode("utf-8") if sys.version_info[0] == 2 else viter382) + oprot.writeMapEnd() + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class PrimaryKeysRequest: + """ + Attributes: + - db_name + - tbl_name + - catName + - validWriteIdList + - tableId + + """ + + def __init__( + self, + db_name=None, + tbl_name=None, + catName=None, + validWriteIdList=None, + tableId=-1, + ): + self.db_name = db_name + self.tbl_name = tbl_name + self.catName = catName + self.validWriteIdList = validWriteIdList + self.tableId = tableId + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.db_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.tbl_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.catName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRING: + self.validWriteIdList = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.I64: + self.tableId = iprot.readI64() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("PrimaryKeysRequest") + if self.db_name is not None: + oprot.writeFieldBegin("db_name", TType.STRING, 1) + oprot.writeString(self.db_name.encode("utf-8") if sys.version_info[0] == 2 else self.db_name) + oprot.writeFieldEnd() + if self.tbl_name is not None: + oprot.writeFieldBegin("tbl_name", TType.STRING, 2) + oprot.writeString(self.tbl_name.encode("utf-8") if sys.version_info[0] == 2 else self.tbl_name) + oprot.writeFieldEnd() + if self.catName is not None: + oprot.writeFieldBegin("catName", TType.STRING, 3) + oprot.writeString(self.catName.encode("utf-8") if sys.version_info[0] == 2 else self.catName) + oprot.writeFieldEnd() + if self.validWriteIdList is not None: + oprot.writeFieldBegin("validWriteIdList", TType.STRING, 4) + oprot.writeString(self.validWriteIdList.encode("utf-8") if sys.version_info[0] == 2 else self.validWriteIdList) + oprot.writeFieldEnd() + if self.tableId is not None: + oprot.writeFieldBegin("tableId", TType.I64, 5) + oprot.writeI64(self.tableId) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.db_name is None: + raise TProtocolException(message="Required field db_name is unset!") + if self.tbl_name is None: + raise TProtocolException(message="Required field tbl_name is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class PrimaryKeysResponse: + """ + Attributes: + - primaryKeys + + """ + + def __init__( + self, + primaryKeys=None, + ): + self.primaryKeys = primaryKeys + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.LIST: + self.primaryKeys = [] + (_etype386, _size383) = iprot.readListBegin() + for _i387 in range(_size383): + _elem388 = SQLPrimaryKey() + _elem388.read(iprot) + self.primaryKeys.append(_elem388) + iprot.readListEnd() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("PrimaryKeysResponse") + if self.primaryKeys is not None: + oprot.writeFieldBegin("primaryKeys", TType.LIST, 1) + oprot.writeListBegin(TType.STRUCT, len(self.primaryKeys)) + for iter389 in self.primaryKeys: + iter389.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.primaryKeys is None: + raise TProtocolException(message="Required field primaryKeys is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class ForeignKeysRequest: + """ + Attributes: + - parent_db_name + - parent_tbl_name + - foreign_db_name + - foreign_tbl_name + - catName + - validWriteIdList + - tableId + + """ + + def __init__( + self, + parent_db_name=None, + parent_tbl_name=None, + foreign_db_name=None, + foreign_tbl_name=None, + catName=None, + validWriteIdList=None, + tableId=-1, + ): + self.parent_db_name = parent_db_name + self.parent_tbl_name = parent_tbl_name + self.foreign_db_name = foreign_db_name + self.foreign_tbl_name = foreign_tbl_name + self.catName = catName + self.validWriteIdList = validWriteIdList + self.tableId = tableId + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.parent_db_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.parent_tbl_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.foreign_db_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRING: + self.foreign_tbl_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.STRING: + self.catName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 6: + if ftype == TType.STRING: + self.validWriteIdList = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 7: + if ftype == TType.I64: + self.tableId = iprot.readI64() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("ForeignKeysRequest") + if self.parent_db_name is not None: + oprot.writeFieldBegin("parent_db_name", TType.STRING, 1) + oprot.writeString(self.parent_db_name.encode("utf-8") if sys.version_info[0] == 2 else self.parent_db_name) + oprot.writeFieldEnd() + if self.parent_tbl_name is not None: + oprot.writeFieldBegin("parent_tbl_name", TType.STRING, 2) + oprot.writeString(self.parent_tbl_name.encode("utf-8") if sys.version_info[0] == 2 else self.parent_tbl_name) + oprot.writeFieldEnd() + if self.foreign_db_name is not None: + oprot.writeFieldBegin("foreign_db_name", TType.STRING, 3) + oprot.writeString(self.foreign_db_name.encode("utf-8") if sys.version_info[0] == 2 else self.foreign_db_name) + oprot.writeFieldEnd() + if self.foreign_tbl_name is not None: + oprot.writeFieldBegin("foreign_tbl_name", TType.STRING, 4) + oprot.writeString(self.foreign_tbl_name.encode("utf-8") if sys.version_info[0] == 2 else self.foreign_tbl_name) + oprot.writeFieldEnd() + if self.catName is not None: + oprot.writeFieldBegin("catName", TType.STRING, 5) + oprot.writeString(self.catName.encode("utf-8") if sys.version_info[0] == 2 else self.catName) + oprot.writeFieldEnd() + if self.validWriteIdList is not None: + oprot.writeFieldBegin("validWriteIdList", TType.STRING, 6) + oprot.writeString(self.validWriteIdList.encode("utf-8") if sys.version_info[0] == 2 else self.validWriteIdList) + oprot.writeFieldEnd() + if self.tableId is not None: + oprot.writeFieldBegin("tableId", TType.I64, 7) + oprot.writeI64(self.tableId) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class ForeignKeysResponse: + """ + Attributes: + - foreignKeys + + """ + + def __init__( + self, + foreignKeys=None, + ): + self.foreignKeys = foreignKeys + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.LIST: + self.foreignKeys = [] + (_etype393, _size390) = iprot.readListBegin() + for _i394 in range(_size390): + _elem395 = SQLForeignKey() + _elem395.read(iprot) + self.foreignKeys.append(_elem395) + iprot.readListEnd() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("ForeignKeysResponse") + if self.foreignKeys is not None: + oprot.writeFieldBegin("foreignKeys", TType.LIST, 1) + oprot.writeListBegin(TType.STRUCT, len(self.foreignKeys)) + for iter396 in self.foreignKeys: + iter396.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.foreignKeys is None: + raise TProtocolException(message="Required field foreignKeys is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class UniqueConstraintsRequest: + """ + Attributes: + - catName + - db_name + - tbl_name + - validWriteIdList + - tableId + + """ + + def __init__( + self, + catName=None, + db_name=None, + tbl_name=None, + validWriteIdList=None, + tableId=-1, + ): + self.catName = catName + self.db_name = db_name + self.tbl_name = tbl_name + self.validWriteIdList = validWriteIdList + self.tableId = tableId + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.catName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.db_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.tbl_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRING: + self.validWriteIdList = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.I64: + self.tableId = iprot.readI64() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("UniqueConstraintsRequest") + if self.catName is not None: + oprot.writeFieldBegin("catName", TType.STRING, 1) + oprot.writeString(self.catName.encode("utf-8") if sys.version_info[0] == 2 else self.catName) + oprot.writeFieldEnd() + if self.db_name is not None: + oprot.writeFieldBegin("db_name", TType.STRING, 2) + oprot.writeString(self.db_name.encode("utf-8") if sys.version_info[0] == 2 else self.db_name) + oprot.writeFieldEnd() + if self.tbl_name is not None: + oprot.writeFieldBegin("tbl_name", TType.STRING, 3) + oprot.writeString(self.tbl_name.encode("utf-8") if sys.version_info[0] == 2 else self.tbl_name) + oprot.writeFieldEnd() + if self.validWriteIdList is not None: + oprot.writeFieldBegin("validWriteIdList", TType.STRING, 4) + oprot.writeString(self.validWriteIdList.encode("utf-8") if sys.version_info[0] == 2 else self.validWriteIdList) + oprot.writeFieldEnd() + if self.tableId is not None: + oprot.writeFieldBegin("tableId", TType.I64, 5) + oprot.writeI64(self.tableId) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.catName is None: + raise TProtocolException(message="Required field catName is unset!") + if self.db_name is None: + raise TProtocolException(message="Required field db_name is unset!") + if self.tbl_name is None: + raise TProtocolException(message="Required field tbl_name is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class UniqueConstraintsResponse: + """ + Attributes: + - uniqueConstraints + + """ + + def __init__( + self, + uniqueConstraints=None, + ): + self.uniqueConstraints = uniqueConstraints + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.LIST: + self.uniqueConstraints = [] + (_etype400, _size397) = iprot.readListBegin() + for _i401 in range(_size397): + _elem402 = SQLUniqueConstraint() + _elem402.read(iprot) + self.uniqueConstraints.append(_elem402) + iprot.readListEnd() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("UniqueConstraintsResponse") + if self.uniqueConstraints is not None: + oprot.writeFieldBegin("uniqueConstraints", TType.LIST, 1) + oprot.writeListBegin(TType.STRUCT, len(self.uniqueConstraints)) + for iter403 in self.uniqueConstraints: + iter403.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.uniqueConstraints is None: + raise TProtocolException(message="Required field uniqueConstraints is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class NotNullConstraintsRequest: + """ + Attributes: + - catName + - db_name + - tbl_name + - validWriteIdList + - tableId + + """ + + def __init__( + self, + catName=None, + db_name=None, + tbl_name=None, + validWriteIdList=None, + tableId=-1, + ): + self.catName = catName + self.db_name = db_name + self.tbl_name = tbl_name + self.validWriteIdList = validWriteIdList + self.tableId = tableId + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.catName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.db_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.tbl_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRING: + self.validWriteIdList = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.I64: + self.tableId = iprot.readI64() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("NotNullConstraintsRequest") + if self.catName is not None: + oprot.writeFieldBegin("catName", TType.STRING, 1) + oprot.writeString(self.catName.encode("utf-8") if sys.version_info[0] == 2 else self.catName) + oprot.writeFieldEnd() + if self.db_name is not None: + oprot.writeFieldBegin("db_name", TType.STRING, 2) + oprot.writeString(self.db_name.encode("utf-8") if sys.version_info[0] == 2 else self.db_name) + oprot.writeFieldEnd() + if self.tbl_name is not None: + oprot.writeFieldBegin("tbl_name", TType.STRING, 3) + oprot.writeString(self.tbl_name.encode("utf-8") if sys.version_info[0] == 2 else self.tbl_name) + oprot.writeFieldEnd() + if self.validWriteIdList is not None: + oprot.writeFieldBegin("validWriteIdList", TType.STRING, 4) + oprot.writeString(self.validWriteIdList.encode("utf-8") if sys.version_info[0] == 2 else self.validWriteIdList) + oprot.writeFieldEnd() + if self.tableId is not None: + oprot.writeFieldBegin("tableId", TType.I64, 5) + oprot.writeI64(self.tableId) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.catName is None: + raise TProtocolException(message="Required field catName is unset!") + if self.db_name is None: + raise TProtocolException(message="Required field db_name is unset!") + if self.tbl_name is None: + raise TProtocolException(message="Required field tbl_name is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class NotNullConstraintsResponse: + """ + Attributes: + - notNullConstraints + + """ + + def __init__( + self, + notNullConstraints=None, + ): + self.notNullConstraints = notNullConstraints + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.LIST: + self.notNullConstraints = [] + (_etype407, _size404) = iprot.readListBegin() + for _i408 in range(_size404): + _elem409 = SQLNotNullConstraint() + _elem409.read(iprot) + self.notNullConstraints.append(_elem409) + iprot.readListEnd() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("NotNullConstraintsResponse") + if self.notNullConstraints is not None: + oprot.writeFieldBegin("notNullConstraints", TType.LIST, 1) + oprot.writeListBegin(TType.STRUCT, len(self.notNullConstraints)) + for iter410 in self.notNullConstraints: + iter410.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.notNullConstraints is None: + raise TProtocolException(message="Required field notNullConstraints is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class DefaultConstraintsRequest: + """ + Attributes: + - catName + - db_name + - tbl_name + - validWriteIdList + - tableId + + """ + + def __init__( + self, + catName=None, + db_name=None, + tbl_name=None, + validWriteIdList=None, + tableId=-1, + ): + self.catName = catName + self.db_name = db_name + self.tbl_name = tbl_name + self.validWriteIdList = validWriteIdList + self.tableId = tableId + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.catName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.db_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.tbl_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRING: + self.validWriteIdList = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.I64: + self.tableId = iprot.readI64() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("DefaultConstraintsRequest") + if self.catName is not None: + oprot.writeFieldBegin("catName", TType.STRING, 1) + oprot.writeString(self.catName.encode("utf-8") if sys.version_info[0] == 2 else self.catName) + oprot.writeFieldEnd() + if self.db_name is not None: + oprot.writeFieldBegin("db_name", TType.STRING, 2) + oprot.writeString(self.db_name.encode("utf-8") if sys.version_info[0] == 2 else self.db_name) + oprot.writeFieldEnd() + if self.tbl_name is not None: + oprot.writeFieldBegin("tbl_name", TType.STRING, 3) + oprot.writeString(self.tbl_name.encode("utf-8") if sys.version_info[0] == 2 else self.tbl_name) + oprot.writeFieldEnd() + if self.validWriteIdList is not None: + oprot.writeFieldBegin("validWriteIdList", TType.STRING, 4) + oprot.writeString(self.validWriteIdList.encode("utf-8") if sys.version_info[0] == 2 else self.validWriteIdList) + oprot.writeFieldEnd() + if self.tableId is not None: + oprot.writeFieldBegin("tableId", TType.I64, 5) + oprot.writeI64(self.tableId) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.catName is None: + raise TProtocolException(message="Required field catName is unset!") + if self.db_name is None: + raise TProtocolException(message="Required field db_name is unset!") + if self.tbl_name is None: + raise TProtocolException(message="Required field tbl_name is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class DefaultConstraintsResponse: + """ + Attributes: + - defaultConstraints + + """ + + def __init__( + self, + defaultConstraints=None, + ): + self.defaultConstraints = defaultConstraints + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.LIST: + self.defaultConstraints = [] + (_etype414, _size411) = iprot.readListBegin() + for _i415 in range(_size411): + _elem416 = SQLDefaultConstraint() + _elem416.read(iprot) + self.defaultConstraints.append(_elem416) + iprot.readListEnd() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("DefaultConstraintsResponse") + if self.defaultConstraints is not None: + oprot.writeFieldBegin("defaultConstraints", TType.LIST, 1) + oprot.writeListBegin(TType.STRUCT, len(self.defaultConstraints)) + for iter417 in self.defaultConstraints: + iter417.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.defaultConstraints is None: + raise TProtocolException(message="Required field defaultConstraints is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class CheckConstraintsRequest: + """ + Attributes: + - catName + - db_name + - tbl_name + - validWriteIdList + - tableId + + """ + + def __init__( + self, + catName=None, + db_name=None, + tbl_name=None, + validWriteIdList=None, + tableId=-1, + ): + self.catName = catName + self.db_name = db_name + self.tbl_name = tbl_name + self.validWriteIdList = validWriteIdList + self.tableId = tableId + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.catName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.db_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.tbl_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRING: + self.validWriteIdList = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.I64: + self.tableId = iprot.readI64() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("CheckConstraintsRequest") + if self.catName is not None: + oprot.writeFieldBegin("catName", TType.STRING, 1) + oprot.writeString(self.catName.encode("utf-8") if sys.version_info[0] == 2 else self.catName) + oprot.writeFieldEnd() + if self.db_name is not None: + oprot.writeFieldBegin("db_name", TType.STRING, 2) + oprot.writeString(self.db_name.encode("utf-8") if sys.version_info[0] == 2 else self.db_name) + oprot.writeFieldEnd() + if self.tbl_name is not None: + oprot.writeFieldBegin("tbl_name", TType.STRING, 3) + oprot.writeString(self.tbl_name.encode("utf-8") if sys.version_info[0] == 2 else self.tbl_name) + oprot.writeFieldEnd() + if self.validWriteIdList is not None: + oprot.writeFieldBegin("validWriteIdList", TType.STRING, 4) + oprot.writeString(self.validWriteIdList.encode("utf-8") if sys.version_info[0] == 2 else self.validWriteIdList) + oprot.writeFieldEnd() + if self.tableId is not None: + oprot.writeFieldBegin("tableId", TType.I64, 5) + oprot.writeI64(self.tableId) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.catName is None: + raise TProtocolException(message="Required field catName is unset!") + if self.db_name is None: + raise TProtocolException(message="Required field db_name is unset!") + if self.tbl_name is None: + raise TProtocolException(message="Required field tbl_name is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class CheckConstraintsResponse: + """ + Attributes: + - checkConstraints + + """ + + def __init__( + self, + checkConstraints=None, + ): + self.checkConstraints = checkConstraints + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.LIST: + self.checkConstraints = [] + (_etype421, _size418) = iprot.readListBegin() + for _i422 in range(_size418): + _elem423 = SQLCheckConstraint() + _elem423.read(iprot) + self.checkConstraints.append(_elem423) + iprot.readListEnd() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("CheckConstraintsResponse") + if self.checkConstraints is not None: + oprot.writeFieldBegin("checkConstraints", TType.LIST, 1) + oprot.writeListBegin(TType.STRUCT, len(self.checkConstraints)) + for iter424 in self.checkConstraints: + iter424.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.checkConstraints is None: + raise TProtocolException(message="Required field checkConstraints is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class AllTableConstraintsRequest: + """ + Attributes: + - dbName + - tblName + - catName + - validWriteIdList + - tableId + + """ + + def __init__( + self, + dbName=None, + tblName=None, + catName=None, + validWriteIdList=None, + tableId=-1, + ): + self.dbName = dbName + self.tblName = tblName + self.catName = catName + self.validWriteIdList = validWriteIdList + self.tableId = tableId + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.dbName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.tblName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.catName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRING: + self.validWriteIdList = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.I64: + self.tableId = iprot.readI64() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("AllTableConstraintsRequest") + if self.dbName is not None: + oprot.writeFieldBegin("dbName", TType.STRING, 1) + oprot.writeString(self.dbName.encode("utf-8") if sys.version_info[0] == 2 else self.dbName) + oprot.writeFieldEnd() + if self.tblName is not None: + oprot.writeFieldBegin("tblName", TType.STRING, 2) + oprot.writeString(self.tblName.encode("utf-8") if sys.version_info[0] == 2 else self.tblName) + oprot.writeFieldEnd() + if self.catName is not None: + oprot.writeFieldBegin("catName", TType.STRING, 3) + oprot.writeString(self.catName.encode("utf-8") if sys.version_info[0] == 2 else self.catName) + oprot.writeFieldEnd() + if self.validWriteIdList is not None: + oprot.writeFieldBegin("validWriteIdList", TType.STRING, 4) + oprot.writeString(self.validWriteIdList.encode("utf-8") if sys.version_info[0] == 2 else self.validWriteIdList) + oprot.writeFieldEnd() + if self.tableId is not None: + oprot.writeFieldBegin("tableId", TType.I64, 5) + oprot.writeI64(self.tableId) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.dbName is None: + raise TProtocolException(message="Required field dbName is unset!") + if self.tblName is None: + raise TProtocolException(message="Required field tblName is unset!") + if self.catName is None: + raise TProtocolException(message="Required field catName is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class AllTableConstraintsResponse: + """ + Attributes: + - allTableConstraints + + """ + + def __init__( + self, + allTableConstraints=None, + ): + self.allTableConstraints = allTableConstraints + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.allTableConstraints = SQLAllTableConstraints() + self.allTableConstraints.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("AllTableConstraintsResponse") + if self.allTableConstraints is not None: + oprot.writeFieldBegin("allTableConstraints", TType.STRUCT, 1) + self.allTableConstraints.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.allTableConstraints is None: + raise TProtocolException(message="Required field allTableConstraints is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class DropConstraintRequest: + """ + Attributes: + - dbname + - tablename + - constraintname + - catName + + """ + + def __init__( + self, + dbname=None, + tablename=None, + constraintname=None, + catName=None, + ): + self.dbname = dbname + self.tablename = tablename + self.constraintname = constraintname + self.catName = catName + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.dbname = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.tablename = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.constraintname = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRING: + self.catName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("DropConstraintRequest") + if self.dbname is not None: + oprot.writeFieldBegin("dbname", TType.STRING, 1) + oprot.writeString(self.dbname.encode("utf-8") if sys.version_info[0] == 2 else self.dbname) + oprot.writeFieldEnd() + if self.tablename is not None: + oprot.writeFieldBegin("tablename", TType.STRING, 2) + oprot.writeString(self.tablename.encode("utf-8") if sys.version_info[0] == 2 else self.tablename) + oprot.writeFieldEnd() + if self.constraintname is not None: + oprot.writeFieldBegin("constraintname", TType.STRING, 3) + oprot.writeString(self.constraintname.encode("utf-8") if sys.version_info[0] == 2 else self.constraintname) + oprot.writeFieldEnd() + if self.catName is not None: + oprot.writeFieldBegin("catName", TType.STRING, 4) + oprot.writeString(self.catName.encode("utf-8") if sys.version_info[0] == 2 else self.catName) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.dbname is None: + raise TProtocolException(message="Required field dbname is unset!") + if self.tablename is None: + raise TProtocolException(message="Required field tablename is unset!") + if self.constraintname is None: + raise TProtocolException(message="Required field constraintname is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class AddPrimaryKeyRequest: + """ + Attributes: + - primaryKeyCols + + """ + + def __init__( + self, + primaryKeyCols=None, + ): + self.primaryKeyCols = primaryKeyCols + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.LIST: + self.primaryKeyCols = [] + (_etype428, _size425) = iprot.readListBegin() + for _i429 in range(_size425): + _elem430 = SQLPrimaryKey() + _elem430.read(iprot) + self.primaryKeyCols.append(_elem430) + iprot.readListEnd() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("AddPrimaryKeyRequest") + if self.primaryKeyCols is not None: + oprot.writeFieldBegin("primaryKeyCols", TType.LIST, 1) + oprot.writeListBegin(TType.STRUCT, len(self.primaryKeyCols)) + for iter431 in self.primaryKeyCols: + iter431.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.primaryKeyCols is None: + raise TProtocolException(message="Required field primaryKeyCols is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class AddForeignKeyRequest: + """ + Attributes: + - foreignKeyCols + + """ + + def __init__( + self, + foreignKeyCols=None, + ): + self.foreignKeyCols = foreignKeyCols + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.LIST: + self.foreignKeyCols = [] + (_etype435, _size432) = iprot.readListBegin() + for _i436 in range(_size432): + _elem437 = SQLForeignKey() + _elem437.read(iprot) + self.foreignKeyCols.append(_elem437) + iprot.readListEnd() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("AddForeignKeyRequest") + if self.foreignKeyCols is not None: + oprot.writeFieldBegin("foreignKeyCols", TType.LIST, 1) + oprot.writeListBegin(TType.STRUCT, len(self.foreignKeyCols)) + for iter438 in self.foreignKeyCols: + iter438.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.foreignKeyCols is None: + raise TProtocolException(message="Required field foreignKeyCols is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class AddUniqueConstraintRequest: + """ + Attributes: + - uniqueConstraintCols + + """ + + def __init__( + self, + uniqueConstraintCols=None, + ): + self.uniqueConstraintCols = uniqueConstraintCols + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.LIST: + self.uniqueConstraintCols = [] + (_etype442, _size439) = iprot.readListBegin() + for _i443 in range(_size439): + _elem444 = SQLUniqueConstraint() + _elem444.read(iprot) + self.uniqueConstraintCols.append(_elem444) + iprot.readListEnd() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("AddUniqueConstraintRequest") + if self.uniqueConstraintCols is not None: + oprot.writeFieldBegin("uniqueConstraintCols", TType.LIST, 1) + oprot.writeListBegin(TType.STRUCT, len(self.uniqueConstraintCols)) + for iter445 in self.uniqueConstraintCols: + iter445.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.uniqueConstraintCols is None: + raise TProtocolException(message="Required field uniqueConstraintCols is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class AddNotNullConstraintRequest: + """ + Attributes: + - notNullConstraintCols + + """ + + def __init__( + self, + notNullConstraintCols=None, + ): + self.notNullConstraintCols = notNullConstraintCols + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.LIST: + self.notNullConstraintCols = [] + (_etype449, _size446) = iprot.readListBegin() + for _i450 in range(_size446): + _elem451 = SQLNotNullConstraint() + _elem451.read(iprot) + self.notNullConstraintCols.append(_elem451) + iprot.readListEnd() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("AddNotNullConstraintRequest") + if self.notNullConstraintCols is not None: + oprot.writeFieldBegin("notNullConstraintCols", TType.LIST, 1) + oprot.writeListBegin(TType.STRUCT, len(self.notNullConstraintCols)) + for iter452 in self.notNullConstraintCols: + iter452.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.notNullConstraintCols is None: + raise TProtocolException(message="Required field notNullConstraintCols is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class AddDefaultConstraintRequest: + """ + Attributes: + - defaultConstraintCols + + """ + + def __init__( + self, + defaultConstraintCols=None, + ): + self.defaultConstraintCols = defaultConstraintCols + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.LIST: + self.defaultConstraintCols = [] + (_etype456, _size453) = iprot.readListBegin() + for _i457 in range(_size453): + _elem458 = SQLDefaultConstraint() + _elem458.read(iprot) + self.defaultConstraintCols.append(_elem458) + iprot.readListEnd() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("AddDefaultConstraintRequest") + if self.defaultConstraintCols is not None: + oprot.writeFieldBegin("defaultConstraintCols", TType.LIST, 1) + oprot.writeListBegin(TType.STRUCT, len(self.defaultConstraintCols)) + for iter459 in self.defaultConstraintCols: + iter459.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.defaultConstraintCols is None: + raise TProtocolException(message="Required field defaultConstraintCols is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class AddCheckConstraintRequest: + """ + Attributes: + - checkConstraintCols + + """ + + def __init__( + self, + checkConstraintCols=None, + ): + self.checkConstraintCols = checkConstraintCols + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.LIST: + self.checkConstraintCols = [] + (_etype463, _size460) = iprot.readListBegin() + for _i464 in range(_size460): + _elem465 = SQLCheckConstraint() + _elem465.read(iprot) + self.checkConstraintCols.append(_elem465) + iprot.readListEnd() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("AddCheckConstraintRequest") + if self.checkConstraintCols is not None: + oprot.writeFieldBegin("checkConstraintCols", TType.LIST, 1) + oprot.writeListBegin(TType.STRUCT, len(self.checkConstraintCols)) + for iter466 in self.checkConstraintCols: + iter466.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.checkConstraintCols is None: + raise TProtocolException(message="Required field checkConstraintCols is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class PartitionsByExprResult: + """ + Attributes: + - partitions + - hasUnknownPartitions + + """ + + def __init__( + self, + partitions=None, + hasUnknownPartitions=None, + ): + self.partitions = partitions + self.hasUnknownPartitions = hasUnknownPartitions + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.LIST: + self.partitions = [] + (_etype470, _size467) = iprot.readListBegin() + for _i471 in range(_size467): + _elem472 = Partition() + _elem472.read(iprot) + self.partitions.append(_elem472) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.BOOL: + self.hasUnknownPartitions = iprot.readBool() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("PartitionsByExprResult") + if self.partitions is not None: + oprot.writeFieldBegin("partitions", TType.LIST, 1) + oprot.writeListBegin(TType.STRUCT, len(self.partitions)) + for iter473 in self.partitions: + iter473.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.hasUnknownPartitions is not None: + oprot.writeFieldBegin("hasUnknownPartitions", TType.BOOL, 2) + oprot.writeBool(self.hasUnknownPartitions) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.partitions is None: + raise TProtocolException(message="Required field partitions is unset!") + if self.hasUnknownPartitions is None: + raise TProtocolException(message="Required field hasUnknownPartitions is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class PartitionsSpecByExprResult: + """ + Attributes: + - partitionsSpec + - hasUnknownPartitions + + """ + + def __init__( + self, + partitionsSpec=None, + hasUnknownPartitions=None, + ): + self.partitionsSpec = partitionsSpec + self.hasUnknownPartitions = hasUnknownPartitions + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.LIST: + self.partitionsSpec = [] + (_etype477, _size474) = iprot.readListBegin() + for _i478 in range(_size474): + _elem479 = PartitionSpec() + _elem479.read(iprot) + self.partitionsSpec.append(_elem479) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.BOOL: + self.hasUnknownPartitions = iprot.readBool() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("PartitionsSpecByExprResult") + if self.partitionsSpec is not None: + oprot.writeFieldBegin("partitionsSpec", TType.LIST, 1) + oprot.writeListBegin(TType.STRUCT, len(self.partitionsSpec)) + for iter480 in self.partitionsSpec: + iter480.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.hasUnknownPartitions is not None: + oprot.writeFieldBegin("hasUnknownPartitions", TType.BOOL, 2) + oprot.writeBool(self.hasUnknownPartitions) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.partitionsSpec is None: + raise TProtocolException(message="Required field partitionsSpec is unset!") + if self.hasUnknownPartitions is None: + raise TProtocolException(message="Required field hasUnknownPartitions is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class PartitionsByExprRequest: + """ + Attributes: + - dbName + - tblName + - expr + - defaultPartitionName + - maxParts + - catName + - order + - validWriteIdList + - id + + """ + + def __init__( + self, + dbName=None, + tblName=None, + expr=None, + defaultPartitionName=None, + maxParts=-1, + catName=None, + order=None, + validWriteIdList=None, + id=-1, + ): + self.dbName = dbName + self.tblName = tblName + self.expr = expr + self.defaultPartitionName = defaultPartitionName + self.maxParts = maxParts + self.catName = catName + self.order = order + self.validWriteIdList = validWriteIdList + self.id = id + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.dbName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.tblName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.expr = iprot.readBinary() + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRING: + self.defaultPartitionName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.I16: + self.maxParts = iprot.readI16() + else: + iprot.skip(ftype) + elif fid == 6: + if ftype == TType.STRING: + self.catName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 7: + if ftype == TType.STRING: + self.order = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 8: + if ftype == TType.STRING: + self.validWriteIdList = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 9: + if ftype == TType.I64: + self.id = iprot.readI64() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("PartitionsByExprRequest") + if self.dbName is not None: + oprot.writeFieldBegin("dbName", TType.STRING, 1) + oprot.writeString(self.dbName.encode("utf-8") if sys.version_info[0] == 2 else self.dbName) + oprot.writeFieldEnd() + if self.tblName is not None: + oprot.writeFieldBegin("tblName", TType.STRING, 2) + oprot.writeString(self.tblName.encode("utf-8") if sys.version_info[0] == 2 else self.tblName) + oprot.writeFieldEnd() + if self.expr is not None: + oprot.writeFieldBegin("expr", TType.STRING, 3) + oprot.writeBinary(self.expr) + oprot.writeFieldEnd() + if self.defaultPartitionName is not None: + oprot.writeFieldBegin("defaultPartitionName", TType.STRING, 4) + oprot.writeString( + self.defaultPartitionName.encode("utf-8") if sys.version_info[0] == 2 else self.defaultPartitionName + ) + oprot.writeFieldEnd() + if self.maxParts is not None: + oprot.writeFieldBegin("maxParts", TType.I16, 5) + oprot.writeI16(self.maxParts) + oprot.writeFieldEnd() + if self.catName is not None: + oprot.writeFieldBegin("catName", TType.STRING, 6) + oprot.writeString(self.catName.encode("utf-8") if sys.version_info[0] == 2 else self.catName) + oprot.writeFieldEnd() + if self.order is not None: + oprot.writeFieldBegin("order", TType.STRING, 7) + oprot.writeString(self.order.encode("utf-8") if sys.version_info[0] == 2 else self.order) + oprot.writeFieldEnd() + if self.validWriteIdList is not None: + oprot.writeFieldBegin("validWriteIdList", TType.STRING, 8) + oprot.writeString(self.validWriteIdList.encode("utf-8") if sys.version_info[0] == 2 else self.validWriteIdList) + oprot.writeFieldEnd() + if self.id is not None: + oprot.writeFieldBegin("id", TType.I64, 9) + oprot.writeI64(self.id) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.dbName is None: + raise TProtocolException(message="Required field dbName is unset!") + if self.tblName is None: + raise TProtocolException(message="Required field tblName is unset!") + if self.expr is None: + raise TProtocolException(message="Required field expr is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class TableStatsResult: + """ + Attributes: + - tableStats + - isStatsCompliant + + """ + + def __init__( + self, + tableStats=None, + isStatsCompliant=None, + ): + self.tableStats = tableStats + self.isStatsCompliant = isStatsCompliant + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.LIST: + self.tableStats = [] + (_etype484, _size481) = iprot.readListBegin() + for _i485 in range(_size481): + _elem486 = ColumnStatisticsObj() + _elem486.read(iprot) + self.tableStats.append(_elem486) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.BOOL: + self.isStatsCompliant = iprot.readBool() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("TableStatsResult") + if self.tableStats is not None: + oprot.writeFieldBegin("tableStats", TType.LIST, 1) + oprot.writeListBegin(TType.STRUCT, len(self.tableStats)) + for iter487 in self.tableStats: + iter487.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.isStatsCompliant is not None: + oprot.writeFieldBegin("isStatsCompliant", TType.BOOL, 2) + oprot.writeBool(self.isStatsCompliant) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.tableStats is None: + raise TProtocolException(message="Required field tableStats is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class PartitionsStatsResult: + """ + Attributes: + - partStats + - isStatsCompliant + + """ + + def __init__( + self, + partStats=None, + isStatsCompliant=None, + ): + self.partStats = partStats + self.isStatsCompliant = isStatsCompliant + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.MAP: + self.partStats = {} + (_ktype489, _vtype490, _size488) = iprot.readMapBegin() + for _i492 in range(_size488): + _key493 = ( + iprot.readString().decode("utf-8", errors="replace") + if sys.version_info[0] == 2 + else iprot.readString() + ) + _val494 = [] + (_etype498, _size495) = iprot.readListBegin() + for _i499 in range(_size495): + _elem500 = ColumnStatisticsObj() + _elem500.read(iprot) + _val494.append(_elem500) + iprot.readListEnd() + self.partStats[_key493] = _val494 + iprot.readMapEnd() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.BOOL: + self.isStatsCompliant = iprot.readBool() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("PartitionsStatsResult") + if self.partStats is not None: + oprot.writeFieldBegin("partStats", TType.MAP, 1) + oprot.writeMapBegin(TType.STRING, TType.LIST, len(self.partStats)) + for kiter501, viter502 in self.partStats.items(): + oprot.writeString(kiter501.encode("utf-8") if sys.version_info[0] == 2 else kiter501) + oprot.writeListBegin(TType.STRUCT, len(viter502)) + for iter503 in viter502: + iter503.write(oprot) + oprot.writeListEnd() + oprot.writeMapEnd() + oprot.writeFieldEnd() + if self.isStatsCompliant is not None: + oprot.writeFieldBegin("isStatsCompliant", TType.BOOL, 2) + oprot.writeBool(self.isStatsCompliant) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.partStats is None: + raise TProtocolException(message="Required field partStats is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class TableStatsRequest: + """ + Attributes: + - dbName + - tblName + - colNames + - catName + - validWriteIdList + - engine + - id + + """ + + def __init__( + self, + dbName=None, + tblName=None, + colNames=None, + catName=None, + validWriteIdList=None, + engine=None, + id=-1, + ): + self.dbName = dbName + self.tblName = tblName + self.colNames = colNames + self.catName = catName + self.validWriteIdList = validWriteIdList + self.engine = engine + self.id = id + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.dbName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.tblName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.LIST: + self.colNames = [] + (_etype507, _size504) = iprot.readListBegin() + for _i508 in range(_size504): + _elem509 = ( + iprot.readString().decode("utf-8", errors="replace") + if sys.version_info[0] == 2 + else iprot.readString() + ) + self.colNames.append(_elem509) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRING: + self.catName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.STRING: + self.validWriteIdList = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 6: + if ftype == TType.STRING: + self.engine = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 7: + if ftype == TType.I64: + self.id = iprot.readI64() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("TableStatsRequest") + if self.dbName is not None: + oprot.writeFieldBegin("dbName", TType.STRING, 1) + oprot.writeString(self.dbName.encode("utf-8") if sys.version_info[0] == 2 else self.dbName) + oprot.writeFieldEnd() + if self.tblName is not None: + oprot.writeFieldBegin("tblName", TType.STRING, 2) + oprot.writeString(self.tblName.encode("utf-8") if sys.version_info[0] == 2 else self.tblName) + oprot.writeFieldEnd() + if self.colNames is not None: + oprot.writeFieldBegin("colNames", TType.LIST, 3) + oprot.writeListBegin(TType.STRING, len(self.colNames)) + for iter510 in self.colNames: + oprot.writeString(iter510.encode("utf-8") if sys.version_info[0] == 2 else iter510) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.catName is not None: + oprot.writeFieldBegin("catName", TType.STRING, 4) + oprot.writeString(self.catName.encode("utf-8") if sys.version_info[0] == 2 else self.catName) + oprot.writeFieldEnd() + if self.validWriteIdList is not None: + oprot.writeFieldBegin("validWriteIdList", TType.STRING, 5) + oprot.writeString(self.validWriteIdList.encode("utf-8") if sys.version_info[0] == 2 else self.validWriteIdList) + oprot.writeFieldEnd() + if self.engine is not None: + oprot.writeFieldBegin("engine", TType.STRING, 6) + oprot.writeString(self.engine.encode("utf-8") if sys.version_info[0] == 2 else self.engine) + oprot.writeFieldEnd() + if self.id is not None: + oprot.writeFieldBegin("id", TType.I64, 7) + oprot.writeI64(self.id) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.dbName is None: + raise TProtocolException(message="Required field dbName is unset!") + if self.tblName is None: + raise TProtocolException(message="Required field tblName is unset!") + if self.colNames is None: + raise TProtocolException(message="Required field colNames is unset!") + if self.engine is None: + raise TProtocolException(message="Required field engine is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class PartitionsStatsRequest: + """ + Attributes: + - dbName + - tblName + - colNames + - partNames + - catName + - validWriteIdList + - engine + + """ + + def __init__( + self, + dbName=None, + tblName=None, + colNames=None, + partNames=None, + catName=None, + validWriteIdList=None, + engine=None, + ): + self.dbName = dbName + self.tblName = tblName + self.colNames = colNames + self.partNames = partNames + self.catName = catName + self.validWriteIdList = validWriteIdList + self.engine = engine + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.dbName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.tblName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.LIST: + self.colNames = [] + (_etype514, _size511) = iprot.readListBegin() + for _i515 in range(_size511): + _elem516 = ( + iprot.readString().decode("utf-8", errors="replace") + if sys.version_info[0] == 2 + else iprot.readString() + ) + self.colNames.append(_elem516) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.LIST: + self.partNames = [] + (_etype520, _size517) = iprot.readListBegin() + for _i521 in range(_size517): + _elem522 = ( + iprot.readString().decode("utf-8", errors="replace") + if sys.version_info[0] == 2 + else iprot.readString() + ) + self.partNames.append(_elem522) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.STRING: + self.catName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 6: + if ftype == TType.STRING: + self.validWriteIdList = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 7: + if ftype == TType.STRING: + self.engine = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("PartitionsStatsRequest") + if self.dbName is not None: + oprot.writeFieldBegin("dbName", TType.STRING, 1) + oprot.writeString(self.dbName.encode("utf-8") if sys.version_info[0] == 2 else self.dbName) + oprot.writeFieldEnd() + if self.tblName is not None: + oprot.writeFieldBegin("tblName", TType.STRING, 2) + oprot.writeString(self.tblName.encode("utf-8") if sys.version_info[0] == 2 else self.tblName) + oprot.writeFieldEnd() + if self.colNames is not None: + oprot.writeFieldBegin("colNames", TType.LIST, 3) + oprot.writeListBegin(TType.STRING, len(self.colNames)) + for iter523 in self.colNames: + oprot.writeString(iter523.encode("utf-8") if sys.version_info[0] == 2 else iter523) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.partNames is not None: + oprot.writeFieldBegin("partNames", TType.LIST, 4) + oprot.writeListBegin(TType.STRING, len(self.partNames)) + for iter524 in self.partNames: + oprot.writeString(iter524.encode("utf-8") if sys.version_info[0] == 2 else iter524) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.catName is not None: + oprot.writeFieldBegin("catName", TType.STRING, 5) + oprot.writeString(self.catName.encode("utf-8") if sys.version_info[0] == 2 else self.catName) + oprot.writeFieldEnd() + if self.validWriteIdList is not None: + oprot.writeFieldBegin("validWriteIdList", TType.STRING, 6) + oprot.writeString(self.validWriteIdList.encode("utf-8") if sys.version_info[0] == 2 else self.validWriteIdList) + oprot.writeFieldEnd() + if self.engine is not None: + oprot.writeFieldBegin("engine", TType.STRING, 7) + oprot.writeString(self.engine.encode("utf-8") if sys.version_info[0] == 2 else self.engine) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.dbName is None: + raise TProtocolException(message="Required field dbName is unset!") + if self.tblName is None: + raise TProtocolException(message="Required field tblName is unset!") + if self.colNames is None: + raise TProtocolException(message="Required field colNames is unset!") + if self.partNames is None: + raise TProtocolException(message="Required field partNames is unset!") + if self.engine is None: + raise TProtocolException(message="Required field engine is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class AddPartitionsResult: + """ + Attributes: + - partitions + - isStatsCompliant + + """ + + def __init__( + self, + partitions=None, + isStatsCompliant=None, + ): + self.partitions = partitions + self.isStatsCompliant = isStatsCompliant + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.LIST: + self.partitions = [] + (_etype528, _size525) = iprot.readListBegin() + for _i529 in range(_size525): + _elem530 = Partition() + _elem530.read(iprot) + self.partitions.append(_elem530) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.BOOL: + self.isStatsCompliant = iprot.readBool() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("AddPartitionsResult") + if self.partitions is not None: + oprot.writeFieldBegin("partitions", TType.LIST, 1) + oprot.writeListBegin(TType.STRUCT, len(self.partitions)) + for iter531 in self.partitions: + iter531.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.isStatsCompliant is not None: + oprot.writeFieldBegin("isStatsCompliant", TType.BOOL, 2) + oprot.writeBool(self.isStatsCompliant) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class AddPartitionsRequest: + """ + Attributes: + - dbName + - tblName + - parts + - ifNotExists + - needResult + - catName + - validWriteIdList + + """ + + def __init__( + self, + dbName=None, + tblName=None, + parts=None, + ifNotExists=None, + needResult=True, + catName=None, + validWriteIdList=None, + ): + self.dbName = dbName + self.tblName = tblName + self.parts = parts + self.ifNotExists = ifNotExists + self.needResult = needResult + self.catName = catName + self.validWriteIdList = validWriteIdList + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.dbName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.tblName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.LIST: + self.parts = [] + (_etype535, _size532) = iprot.readListBegin() + for _i536 in range(_size532): + _elem537 = Partition() + _elem537.read(iprot) + self.parts.append(_elem537) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.BOOL: + self.ifNotExists = iprot.readBool() + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.BOOL: + self.needResult = iprot.readBool() + else: + iprot.skip(ftype) + elif fid == 6: + if ftype == TType.STRING: + self.catName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 7: + if ftype == TType.STRING: + self.validWriteIdList = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("AddPartitionsRequest") + if self.dbName is not None: + oprot.writeFieldBegin("dbName", TType.STRING, 1) + oprot.writeString(self.dbName.encode("utf-8") if sys.version_info[0] == 2 else self.dbName) + oprot.writeFieldEnd() + if self.tblName is not None: + oprot.writeFieldBegin("tblName", TType.STRING, 2) + oprot.writeString(self.tblName.encode("utf-8") if sys.version_info[0] == 2 else self.tblName) + oprot.writeFieldEnd() + if self.parts is not None: + oprot.writeFieldBegin("parts", TType.LIST, 3) + oprot.writeListBegin(TType.STRUCT, len(self.parts)) + for iter538 in self.parts: + iter538.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.ifNotExists is not None: + oprot.writeFieldBegin("ifNotExists", TType.BOOL, 4) + oprot.writeBool(self.ifNotExists) + oprot.writeFieldEnd() + if self.needResult is not None: + oprot.writeFieldBegin("needResult", TType.BOOL, 5) + oprot.writeBool(self.needResult) + oprot.writeFieldEnd() + if self.catName is not None: + oprot.writeFieldBegin("catName", TType.STRING, 6) + oprot.writeString(self.catName.encode("utf-8") if sys.version_info[0] == 2 else self.catName) + oprot.writeFieldEnd() + if self.validWriteIdList is not None: + oprot.writeFieldBegin("validWriteIdList", TType.STRING, 7) + oprot.writeString(self.validWriteIdList.encode("utf-8") if sys.version_info[0] == 2 else self.validWriteIdList) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.dbName is None: + raise TProtocolException(message="Required field dbName is unset!") + if self.tblName is None: + raise TProtocolException(message="Required field tblName is unset!") + if self.parts is None: + raise TProtocolException(message="Required field parts is unset!") + if self.ifNotExists is None: + raise TProtocolException(message="Required field ifNotExists is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class DropPartitionsResult: + """ + Attributes: + - partitions + + """ + + def __init__( + self, + partitions=None, + ): + self.partitions = partitions + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.LIST: + self.partitions = [] + (_etype542, _size539) = iprot.readListBegin() + for _i543 in range(_size539): + _elem544 = Partition() + _elem544.read(iprot) + self.partitions.append(_elem544) + iprot.readListEnd() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("DropPartitionsResult") + if self.partitions is not None: + oprot.writeFieldBegin("partitions", TType.LIST, 1) + oprot.writeListBegin(TType.STRUCT, len(self.partitions)) + for iter545 in self.partitions: + iter545.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class DropPartitionsExpr: + """ + Attributes: + - expr + - partArchiveLevel + + """ + + def __init__( + self, + expr=None, + partArchiveLevel=None, + ): + self.expr = expr + self.partArchiveLevel = partArchiveLevel + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.expr = iprot.readBinary() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.I32: + self.partArchiveLevel = iprot.readI32() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("DropPartitionsExpr") + if self.expr is not None: + oprot.writeFieldBegin("expr", TType.STRING, 1) + oprot.writeBinary(self.expr) + oprot.writeFieldEnd() + if self.partArchiveLevel is not None: + oprot.writeFieldBegin("partArchiveLevel", TType.I32, 2) + oprot.writeI32(self.partArchiveLevel) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.expr is None: + raise TProtocolException(message="Required field expr is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class RequestPartsSpec: + """ + Attributes: + - names + - exprs + + """ + + def __init__( + self, + names=None, + exprs=None, + ): + self.names = names + self.exprs = exprs + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.LIST: + self.names = [] + (_etype549, _size546) = iprot.readListBegin() + for _i550 in range(_size546): + _elem551 = ( + iprot.readString().decode("utf-8", errors="replace") + if sys.version_info[0] == 2 + else iprot.readString() + ) + self.names.append(_elem551) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.LIST: + self.exprs = [] + (_etype555, _size552) = iprot.readListBegin() + for _i556 in range(_size552): + _elem557 = DropPartitionsExpr() + _elem557.read(iprot) + self.exprs.append(_elem557) + iprot.readListEnd() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("RequestPartsSpec") + if self.names is not None: + oprot.writeFieldBegin("names", TType.LIST, 1) + oprot.writeListBegin(TType.STRING, len(self.names)) + for iter558 in self.names: + oprot.writeString(iter558.encode("utf-8") if sys.version_info[0] == 2 else iter558) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.exprs is not None: + oprot.writeFieldBegin("exprs", TType.LIST, 2) + oprot.writeListBegin(TType.STRUCT, len(self.exprs)) + for iter559 in self.exprs: + iter559.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class DropPartitionsRequest: + """ + Attributes: + - dbName + - tblName + - parts + - deleteData + - ifExists + - ignoreProtection + - environmentContext + - needResult + - catName + + """ + + def __init__( + self, + dbName=None, + tblName=None, + parts=None, + deleteData=None, + ifExists=True, + ignoreProtection=None, + environmentContext=None, + needResult=True, + catName=None, + ): + self.dbName = dbName + self.tblName = tblName + self.parts = parts + self.deleteData = deleteData + self.ifExists = ifExists + self.ignoreProtection = ignoreProtection + self.environmentContext = environmentContext + self.needResult = needResult + self.catName = catName + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.dbName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.tblName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRUCT: + self.parts = RequestPartsSpec() + self.parts.read(iprot) + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.BOOL: + self.deleteData = iprot.readBool() + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.BOOL: + self.ifExists = iprot.readBool() + else: + iprot.skip(ftype) + elif fid == 6: + if ftype == TType.BOOL: + self.ignoreProtection = iprot.readBool() + else: + iprot.skip(ftype) + elif fid == 7: + if ftype == TType.STRUCT: + self.environmentContext = EnvironmentContext() + self.environmentContext.read(iprot) + else: + iprot.skip(ftype) + elif fid == 8: + if ftype == TType.BOOL: + self.needResult = iprot.readBool() + else: + iprot.skip(ftype) + elif fid == 9: + if ftype == TType.STRING: + self.catName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("DropPartitionsRequest") + if self.dbName is not None: + oprot.writeFieldBegin("dbName", TType.STRING, 1) + oprot.writeString(self.dbName.encode("utf-8") if sys.version_info[0] == 2 else self.dbName) + oprot.writeFieldEnd() + if self.tblName is not None: + oprot.writeFieldBegin("tblName", TType.STRING, 2) + oprot.writeString(self.tblName.encode("utf-8") if sys.version_info[0] == 2 else self.tblName) + oprot.writeFieldEnd() + if self.parts is not None: + oprot.writeFieldBegin("parts", TType.STRUCT, 3) + self.parts.write(oprot) + oprot.writeFieldEnd() + if self.deleteData is not None: + oprot.writeFieldBegin("deleteData", TType.BOOL, 4) + oprot.writeBool(self.deleteData) + oprot.writeFieldEnd() + if self.ifExists is not None: + oprot.writeFieldBegin("ifExists", TType.BOOL, 5) + oprot.writeBool(self.ifExists) + oprot.writeFieldEnd() + if self.ignoreProtection is not None: + oprot.writeFieldBegin("ignoreProtection", TType.BOOL, 6) + oprot.writeBool(self.ignoreProtection) + oprot.writeFieldEnd() + if self.environmentContext is not None: + oprot.writeFieldBegin("environmentContext", TType.STRUCT, 7) + self.environmentContext.write(oprot) + oprot.writeFieldEnd() + if self.needResult is not None: + oprot.writeFieldBegin("needResult", TType.BOOL, 8) + oprot.writeBool(self.needResult) + oprot.writeFieldEnd() + if self.catName is not None: + oprot.writeFieldBegin("catName", TType.STRING, 9) + oprot.writeString(self.catName.encode("utf-8") if sys.version_info[0] == 2 else self.catName) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.dbName is None: + raise TProtocolException(message="Required field dbName is unset!") + if self.tblName is None: + raise TProtocolException(message="Required field tblName is unset!") + if self.parts is None: + raise TProtocolException(message="Required field parts is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class PartitionValuesRequest: + """ + Attributes: + - dbName + - tblName + - partitionKeys + - applyDistinct + - filter + - partitionOrder + - ascending + - maxParts + - catName + - validWriteIdList + + """ + + def __init__( + self, + dbName=None, + tblName=None, + partitionKeys=None, + applyDistinct=True, + filter=None, + partitionOrder=None, + ascending=True, + maxParts=-1, + catName=None, + validWriteIdList=None, + ): + self.dbName = dbName + self.tblName = tblName + self.partitionKeys = partitionKeys + self.applyDistinct = applyDistinct + self.filter = filter + self.partitionOrder = partitionOrder + self.ascending = ascending + self.maxParts = maxParts + self.catName = catName + self.validWriteIdList = validWriteIdList + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.dbName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.tblName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.LIST: + self.partitionKeys = [] + (_etype563, _size560) = iprot.readListBegin() + for _i564 in range(_size560): + _elem565 = FieldSchema() + _elem565.read(iprot) + self.partitionKeys.append(_elem565) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.BOOL: + self.applyDistinct = iprot.readBool() + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.STRING: + self.filter = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 6: + if ftype == TType.LIST: + self.partitionOrder = [] + (_etype569, _size566) = iprot.readListBegin() + for _i570 in range(_size566): + _elem571 = FieldSchema() + _elem571.read(iprot) + self.partitionOrder.append(_elem571) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 7: + if ftype == TType.BOOL: + self.ascending = iprot.readBool() + else: + iprot.skip(ftype) + elif fid == 8: + if ftype == TType.I64: + self.maxParts = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 9: + if ftype == TType.STRING: + self.catName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 10: + if ftype == TType.STRING: + self.validWriteIdList = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("PartitionValuesRequest") + if self.dbName is not None: + oprot.writeFieldBegin("dbName", TType.STRING, 1) + oprot.writeString(self.dbName.encode("utf-8") if sys.version_info[0] == 2 else self.dbName) + oprot.writeFieldEnd() + if self.tblName is not None: + oprot.writeFieldBegin("tblName", TType.STRING, 2) + oprot.writeString(self.tblName.encode("utf-8") if sys.version_info[0] == 2 else self.tblName) + oprot.writeFieldEnd() + if self.partitionKeys is not None: + oprot.writeFieldBegin("partitionKeys", TType.LIST, 3) + oprot.writeListBegin(TType.STRUCT, len(self.partitionKeys)) + for iter572 in self.partitionKeys: + iter572.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.applyDistinct is not None: + oprot.writeFieldBegin("applyDistinct", TType.BOOL, 4) + oprot.writeBool(self.applyDistinct) + oprot.writeFieldEnd() + if self.filter is not None: + oprot.writeFieldBegin("filter", TType.STRING, 5) + oprot.writeString(self.filter.encode("utf-8") if sys.version_info[0] == 2 else self.filter) + oprot.writeFieldEnd() + if self.partitionOrder is not None: + oprot.writeFieldBegin("partitionOrder", TType.LIST, 6) + oprot.writeListBegin(TType.STRUCT, len(self.partitionOrder)) + for iter573 in self.partitionOrder: + iter573.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.ascending is not None: + oprot.writeFieldBegin("ascending", TType.BOOL, 7) + oprot.writeBool(self.ascending) + oprot.writeFieldEnd() + if self.maxParts is not None: + oprot.writeFieldBegin("maxParts", TType.I64, 8) + oprot.writeI64(self.maxParts) + oprot.writeFieldEnd() + if self.catName is not None: + oprot.writeFieldBegin("catName", TType.STRING, 9) + oprot.writeString(self.catName.encode("utf-8") if sys.version_info[0] == 2 else self.catName) + oprot.writeFieldEnd() + if self.validWriteIdList is not None: + oprot.writeFieldBegin("validWriteIdList", TType.STRING, 10) + oprot.writeString(self.validWriteIdList.encode("utf-8") if sys.version_info[0] == 2 else self.validWriteIdList) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.dbName is None: + raise TProtocolException(message="Required field dbName is unset!") + if self.tblName is None: + raise TProtocolException(message="Required field tblName is unset!") + if self.partitionKeys is None: + raise TProtocolException(message="Required field partitionKeys is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class PartitionValuesRow: + """ + Attributes: + - row + + """ + + def __init__( + self, + row=None, + ): + self.row = row + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.LIST: + self.row = [] + (_etype577, _size574) = iprot.readListBegin() + for _i578 in range(_size574): + _elem579 = ( + iprot.readString().decode("utf-8", errors="replace") + if sys.version_info[0] == 2 + else iprot.readString() + ) + self.row.append(_elem579) + iprot.readListEnd() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("PartitionValuesRow") + if self.row is not None: + oprot.writeFieldBegin("row", TType.LIST, 1) + oprot.writeListBegin(TType.STRING, len(self.row)) + for iter580 in self.row: + oprot.writeString(iter580.encode("utf-8") if sys.version_info[0] == 2 else iter580) + oprot.writeListEnd() + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.row is None: + raise TProtocolException(message="Required field row is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class PartitionValuesResponse: + """ + Attributes: + - partitionValues + + """ + + def __init__( + self, + partitionValues=None, + ): + self.partitionValues = partitionValues + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.LIST: + self.partitionValues = [] + (_etype584, _size581) = iprot.readListBegin() + for _i585 in range(_size581): + _elem586 = PartitionValuesRow() + _elem586.read(iprot) + self.partitionValues.append(_elem586) + iprot.readListEnd() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("PartitionValuesResponse") + if self.partitionValues is not None: + oprot.writeFieldBegin("partitionValues", TType.LIST, 1) + oprot.writeListBegin(TType.STRUCT, len(self.partitionValues)) + for iter587 in self.partitionValues: + iter587.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.partitionValues is None: + raise TProtocolException(message="Required field partitionValues is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class GetPartitionsByNamesRequest: + """ + Attributes: + - db_name + - tbl_name + - names + - get_col_stats + - processorCapabilities + - processorIdentifier + - engine + - validWriteIdList + - getFileMetadata + - id + + """ + + def __init__( + self, + db_name=None, + tbl_name=None, + names=None, + get_col_stats=None, + processorCapabilities=None, + processorIdentifier=None, + engine=None, + validWriteIdList=None, + getFileMetadata=None, + id=-1, + ): + self.db_name = db_name + self.tbl_name = tbl_name + self.names = names + self.get_col_stats = get_col_stats + self.processorCapabilities = processorCapabilities + self.processorIdentifier = processorIdentifier + self.engine = engine + self.validWriteIdList = validWriteIdList + self.getFileMetadata = getFileMetadata + self.id = id + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.db_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.tbl_name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.LIST: + self.names = [] + (_etype591, _size588) = iprot.readListBegin() + for _i592 in range(_size588): + _elem593 = ( + iprot.readString().decode("utf-8", errors="replace") + if sys.version_info[0] == 2 + else iprot.readString() + ) + self.names.append(_elem593) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.BOOL: + self.get_col_stats = iprot.readBool() + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.LIST: + self.processorCapabilities = [] + (_etype597, _size594) = iprot.readListBegin() + for _i598 in range(_size594): + _elem599 = ( + iprot.readString().decode("utf-8", errors="replace") + if sys.version_info[0] == 2 + else iprot.readString() + ) + self.processorCapabilities.append(_elem599) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 6: + if ftype == TType.STRING: + self.processorIdentifier = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 7: + if ftype == TType.STRING: + self.engine = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 8: + if ftype == TType.STRING: + self.validWriteIdList = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 9: + if ftype == TType.BOOL: + self.getFileMetadata = iprot.readBool() + else: + iprot.skip(ftype) + elif fid == 10: + if ftype == TType.I64: + self.id = iprot.readI64() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("GetPartitionsByNamesRequest") + if self.db_name is not None: + oprot.writeFieldBegin("db_name", TType.STRING, 1) + oprot.writeString(self.db_name.encode("utf-8") if sys.version_info[0] == 2 else self.db_name) + oprot.writeFieldEnd() + if self.tbl_name is not None: + oprot.writeFieldBegin("tbl_name", TType.STRING, 2) + oprot.writeString(self.tbl_name.encode("utf-8") if sys.version_info[0] == 2 else self.tbl_name) + oprot.writeFieldEnd() + if self.names is not None: + oprot.writeFieldBegin("names", TType.LIST, 3) + oprot.writeListBegin(TType.STRING, len(self.names)) + for iter600 in self.names: + oprot.writeString(iter600.encode("utf-8") if sys.version_info[0] == 2 else iter600) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.get_col_stats is not None: + oprot.writeFieldBegin("get_col_stats", TType.BOOL, 4) + oprot.writeBool(self.get_col_stats) + oprot.writeFieldEnd() + if self.processorCapabilities is not None: + oprot.writeFieldBegin("processorCapabilities", TType.LIST, 5) + oprot.writeListBegin(TType.STRING, len(self.processorCapabilities)) + for iter601 in self.processorCapabilities: + oprot.writeString(iter601.encode("utf-8") if sys.version_info[0] == 2 else iter601) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.processorIdentifier is not None: + oprot.writeFieldBegin("processorIdentifier", TType.STRING, 6) + oprot.writeString(self.processorIdentifier.encode("utf-8") if sys.version_info[0] == 2 else self.processorIdentifier) + oprot.writeFieldEnd() + if self.engine is not None: + oprot.writeFieldBegin("engine", TType.STRING, 7) + oprot.writeString(self.engine.encode("utf-8") if sys.version_info[0] == 2 else self.engine) + oprot.writeFieldEnd() + if self.validWriteIdList is not None: + oprot.writeFieldBegin("validWriteIdList", TType.STRING, 8) + oprot.writeString(self.validWriteIdList.encode("utf-8") if sys.version_info[0] == 2 else self.validWriteIdList) + oprot.writeFieldEnd() + if self.getFileMetadata is not None: + oprot.writeFieldBegin("getFileMetadata", TType.BOOL, 9) + oprot.writeBool(self.getFileMetadata) + oprot.writeFieldEnd() + if self.id is not None: + oprot.writeFieldBegin("id", TType.I64, 10) + oprot.writeI64(self.id) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.db_name is None: + raise TProtocolException(message="Required field db_name is unset!") + if self.tbl_name is None: + raise TProtocolException(message="Required field tbl_name is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class GetPartitionsByNamesResult: + """ + Attributes: + - partitions + - dictionary + + """ + + def __init__( + self, + partitions=None, + dictionary=None, + ): + self.partitions = partitions + self.dictionary = dictionary + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.LIST: + self.partitions = [] + (_etype605, _size602) = iprot.readListBegin() + for _i606 in range(_size602): + _elem607 = Partition() + _elem607.read(iprot) + self.partitions.append(_elem607) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.dictionary = ObjectDictionary() + self.dictionary.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("GetPartitionsByNamesResult") + if self.partitions is not None: + oprot.writeFieldBegin("partitions", TType.LIST, 1) + oprot.writeListBegin(TType.STRUCT, len(self.partitions)) + for iter608 in self.partitions: + iter608.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.dictionary is not None: + oprot.writeFieldBegin("dictionary", TType.STRUCT, 2) + self.dictionary.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.partitions is None: + raise TProtocolException(message="Required field partitions is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class DataConnector: + """ + Attributes: + - name + - type + - url + - description + - parameters + - ownerName + - ownerType + - createTime + + """ + + def __init__( + self, + name=None, + type=None, + url=None, + description=None, + parameters=None, + ownerName=None, + ownerType=None, + createTime=None, + ): + self.name = name + self.type = type + self.url = url + self.description = description + self.parameters = parameters + self.ownerName = ownerName + self.ownerType = ownerType + self.createTime = createTime + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.type = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.url = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRING: + self.description = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.MAP: + self.parameters = {} + (_ktype610, _vtype611, _size609) = iprot.readMapBegin() + for _i613 in range(_size609): + _key614 = ( + iprot.readString().decode("utf-8", errors="replace") + if sys.version_info[0] == 2 + else iprot.readString() + ) + _val615 = ( + iprot.readString().decode("utf-8", errors="replace") + if sys.version_info[0] == 2 + else iprot.readString() + ) + self.parameters[_key614] = _val615 + iprot.readMapEnd() + else: + iprot.skip(ftype) + elif fid == 6: + if ftype == TType.STRING: + self.ownerName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 7: + if ftype == TType.I32: + self.ownerType = iprot.readI32() + else: + iprot.skip(ftype) + elif fid == 8: + if ftype == TType.I32: + self.createTime = iprot.readI32() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("DataConnector") + if self.name is not None: + oprot.writeFieldBegin("name", TType.STRING, 1) + oprot.writeString(self.name.encode("utf-8") if sys.version_info[0] == 2 else self.name) + oprot.writeFieldEnd() + if self.type is not None: + oprot.writeFieldBegin("type", TType.STRING, 2) + oprot.writeString(self.type.encode("utf-8") if sys.version_info[0] == 2 else self.type) + oprot.writeFieldEnd() + if self.url is not None: + oprot.writeFieldBegin("url", TType.STRING, 3) + oprot.writeString(self.url.encode("utf-8") if sys.version_info[0] == 2 else self.url) + oprot.writeFieldEnd() + if self.description is not None: + oprot.writeFieldBegin("description", TType.STRING, 4) + oprot.writeString(self.description.encode("utf-8") if sys.version_info[0] == 2 else self.description) + oprot.writeFieldEnd() + if self.parameters is not None: + oprot.writeFieldBegin("parameters", TType.MAP, 5) + oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.parameters)) + for kiter616, viter617 in self.parameters.items(): + oprot.writeString(kiter616.encode("utf-8") if sys.version_info[0] == 2 else kiter616) + oprot.writeString(viter617.encode("utf-8") if sys.version_info[0] == 2 else viter617) + oprot.writeMapEnd() + oprot.writeFieldEnd() + if self.ownerName is not None: + oprot.writeFieldBegin("ownerName", TType.STRING, 6) + oprot.writeString(self.ownerName.encode("utf-8") if sys.version_info[0] == 2 else self.ownerName) + oprot.writeFieldEnd() + if self.ownerType is not None: + oprot.writeFieldBegin("ownerType", TType.I32, 7) + oprot.writeI32(self.ownerType) + oprot.writeFieldEnd() + if self.createTime is not None: + oprot.writeFieldBegin("createTime", TType.I32, 8) + oprot.writeI32(self.createTime) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class ResourceUri: + """ + Attributes: + - resourceType + - uri + + """ + + def __init__( + self, + resourceType=None, + uri=None, + ): + self.resourceType = resourceType + self.uri = uri + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.I32: + self.resourceType = iprot.readI32() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.uri = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("ResourceUri") + if self.resourceType is not None: + oprot.writeFieldBegin("resourceType", TType.I32, 1) + oprot.writeI32(self.resourceType) + oprot.writeFieldEnd() + if self.uri is not None: + oprot.writeFieldBegin("uri", TType.STRING, 2) + oprot.writeString(self.uri.encode("utf-8") if sys.version_info[0] == 2 else self.uri) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class Function: + """ + Attributes: + - functionName + - dbName + - className + - ownerName + - ownerType + - createTime + - functionType + - resourceUris + - catName + + """ + + def __init__( + self, + functionName=None, + dbName=None, + className=None, + ownerName=None, + ownerType=None, + createTime=None, + functionType=None, + resourceUris=None, + catName=None, + ): + self.functionName = functionName + self.dbName = dbName + self.className = className + self.ownerName = ownerName + self.ownerType = ownerType + self.createTime = createTime + self.functionType = functionType + self.resourceUris = resourceUris + self.catName = catName + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.functionName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.dbName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.className = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRING: + self.ownerName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.I32: + self.ownerType = iprot.readI32() + else: + iprot.skip(ftype) + elif fid == 6: + if ftype == TType.I32: + self.createTime = iprot.readI32() + else: + iprot.skip(ftype) + elif fid == 7: + if ftype == TType.I32: + self.functionType = iprot.readI32() + else: + iprot.skip(ftype) + elif fid == 8: + if ftype == TType.LIST: + self.resourceUris = [] + (_etype621, _size618) = iprot.readListBegin() + for _i622 in range(_size618): + _elem623 = ResourceUri() + _elem623.read(iprot) + self.resourceUris.append(_elem623) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 9: + if ftype == TType.STRING: + self.catName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("Function") + if self.functionName is not None: + oprot.writeFieldBegin("functionName", TType.STRING, 1) + oprot.writeString(self.functionName.encode("utf-8") if sys.version_info[0] == 2 else self.functionName) + oprot.writeFieldEnd() + if self.dbName is not None: + oprot.writeFieldBegin("dbName", TType.STRING, 2) + oprot.writeString(self.dbName.encode("utf-8") if sys.version_info[0] == 2 else self.dbName) + oprot.writeFieldEnd() + if self.className is not None: + oprot.writeFieldBegin("className", TType.STRING, 3) + oprot.writeString(self.className.encode("utf-8") if sys.version_info[0] == 2 else self.className) + oprot.writeFieldEnd() + if self.ownerName is not None: + oprot.writeFieldBegin("ownerName", TType.STRING, 4) + oprot.writeString(self.ownerName.encode("utf-8") if sys.version_info[0] == 2 else self.ownerName) + oprot.writeFieldEnd() + if self.ownerType is not None: + oprot.writeFieldBegin("ownerType", TType.I32, 5) + oprot.writeI32(self.ownerType) + oprot.writeFieldEnd() + if self.createTime is not None: + oprot.writeFieldBegin("createTime", TType.I32, 6) + oprot.writeI32(self.createTime) + oprot.writeFieldEnd() + if self.functionType is not None: + oprot.writeFieldBegin("functionType", TType.I32, 7) + oprot.writeI32(self.functionType) + oprot.writeFieldEnd() + if self.resourceUris is not None: + oprot.writeFieldBegin("resourceUris", TType.LIST, 8) + oprot.writeListBegin(TType.STRUCT, len(self.resourceUris)) + for iter624 in self.resourceUris: + iter624.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.catName is not None: + oprot.writeFieldBegin("catName", TType.STRING, 9) + oprot.writeString(self.catName.encode("utf-8") if sys.version_info[0] == 2 else self.catName) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class TxnInfo: + """ + Attributes: + - id + - state + - user + - hostname + - agentInfo + - heartbeatCount + - metaInfo + - startedTime + - lastHeartbeatTime + + """ + + def __init__( + self, + id=None, + state=None, + user=None, + hostname=None, + agentInfo="Unknown", + heartbeatCount=0, + metaInfo=None, + startedTime=None, + lastHeartbeatTime=None, + ): + self.id = id + self.state = state + self.user = user + self.hostname = hostname + self.agentInfo = agentInfo + self.heartbeatCount = heartbeatCount + self.metaInfo = metaInfo + self.startedTime = startedTime + self.lastHeartbeatTime = lastHeartbeatTime + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.I64: + self.id = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.I32: + self.state = iprot.readI32() + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.user = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRING: + self.hostname = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.STRING: + self.agentInfo = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 6: + if ftype == TType.I32: + self.heartbeatCount = iprot.readI32() + else: + iprot.skip(ftype) + elif fid == 7: + if ftype == TType.STRING: + self.metaInfo = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 8: + if ftype == TType.I64: + self.startedTime = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 9: + if ftype == TType.I64: + self.lastHeartbeatTime = iprot.readI64() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("TxnInfo") + if self.id is not None: + oprot.writeFieldBegin("id", TType.I64, 1) + oprot.writeI64(self.id) + oprot.writeFieldEnd() + if self.state is not None: + oprot.writeFieldBegin("state", TType.I32, 2) + oprot.writeI32(self.state) + oprot.writeFieldEnd() + if self.user is not None: + oprot.writeFieldBegin("user", TType.STRING, 3) + oprot.writeString(self.user.encode("utf-8") if sys.version_info[0] == 2 else self.user) + oprot.writeFieldEnd() + if self.hostname is not None: + oprot.writeFieldBegin("hostname", TType.STRING, 4) + oprot.writeString(self.hostname.encode("utf-8") if sys.version_info[0] == 2 else self.hostname) + oprot.writeFieldEnd() + if self.agentInfo is not None: + oprot.writeFieldBegin("agentInfo", TType.STRING, 5) + oprot.writeString(self.agentInfo.encode("utf-8") if sys.version_info[0] == 2 else self.agentInfo) + oprot.writeFieldEnd() + if self.heartbeatCount is not None: + oprot.writeFieldBegin("heartbeatCount", TType.I32, 6) + oprot.writeI32(self.heartbeatCount) + oprot.writeFieldEnd() + if self.metaInfo is not None: + oprot.writeFieldBegin("metaInfo", TType.STRING, 7) + oprot.writeString(self.metaInfo.encode("utf-8") if sys.version_info[0] == 2 else self.metaInfo) + oprot.writeFieldEnd() + if self.startedTime is not None: + oprot.writeFieldBegin("startedTime", TType.I64, 8) + oprot.writeI64(self.startedTime) + oprot.writeFieldEnd() + if self.lastHeartbeatTime is not None: + oprot.writeFieldBegin("lastHeartbeatTime", TType.I64, 9) + oprot.writeI64(self.lastHeartbeatTime) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.id is None: + raise TProtocolException(message="Required field id is unset!") + if self.state is None: + raise TProtocolException(message="Required field state is unset!") + if self.user is None: + raise TProtocolException(message="Required field user is unset!") + if self.hostname is None: + raise TProtocolException(message="Required field hostname is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class GetOpenTxnsInfoResponse: + """ + Attributes: + - txn_high_water_mark + - open_txns + + """ + + def __init__( + self, + txn_high_water_mark=None, + open_txns=None, + ): + self.txn_high_water_mark = txn_high_water_mark + self.open_txns = open_txns + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.I64: + self.txn_high_water_mark = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.LIST: + self.open_txns = [] + (_etype628, _size625) = iprot.readListBegin() + for _i629 in range(_size625): + _elem630 = TxnInfo() + _elem630.read(iprot) + self.open_txns.append(_elem630) + iprot.readListEnd() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("GetOpenTxnsInfoResponse") + if self.txn_high_water_mark is not None: + oprot.writeFieldBegin("txn_high_water_mark", TType.I64, 1) + oprot.writeI64(self.txn_high_water_mark) + oprot.writeFieldEnd() + if self.open_txns is not None: + oprot.writeFieldBegin("open_txns", TType.LIST, 2) + oprot.writeListBegin(TType.STRUCT, len(self.open_txns)) + for iter631 in self.open_txns: + iter631.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.txn_high_water_mark is None: + raise TProtocolException(message="Required field txn_high_water_mark is unset!") + if self.open_txns is None: + raise TProtocolException(message="Required field open_txns is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class GetOpenTxnsResponse: + """ + Attributes: + - txn_high_water_mark + - open_txns + - min_open_txn + - abortedBits + + """ + + def __init__( + self, + txn_high_water_mark=None, + open_txns=None, + min_open_txn=None, + abortedBits=None, + ): + self.txn_high_water_mark = txn_high_water_mark + self.open_txns = open_txns + self.min_open_txn = min_open_txn + self.abortedBits = abortedBits + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.I64: + self.txn_high_water_mark = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.LIST: + self.open_txns = [] + (_etype635, _size632) = iprot.readListBegin() + for _i636 in range(_size632): + _elem637 = iprot.readI64() + self.open_txns.append(_elem637) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.I64: + self.min_open_txn = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRING: + self.abortedBits = iprot.readBinary() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("GetOpenTxnsResponse") + if self.txn_high_water_mark is not None: + oprot.writeFieldBegin("txn_high_water_mark", TType.I64, 1) + oprot.writeI64(self.txn_high_water_mark) + oprot.writeFieldEnd() + if self.open_txns is not None: + oprot.writeFieldBegin("open_txns", TType.LIST, 2) + oprot.writeListBegin(TType.I64, len(self.open_txns)) + for iter638 in self.open_txns: + oprot.writeI64(iter638) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.min_open_txn is not None: + oprot.writeFieldBegin("min_open_txn", TType.I64, 3) + oprot.writeI64(self.min_open_txn) + oprot.writeFieldEnd() + if self.abortedBits is not None: + oprot.writeFieldBegin("abortedBits", TType.STRING, 4) + oprot.writeBinary(self.abortedBits) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.txn_high_water_mark is None: + raise TProtocolException(message="Required field txn_high_water_mark is unset!") + if self.open_txns is None: + raise TProtocolException(message="Required field open_txns is unset!") + if self.abortedBits is None: + raise TProtocolException(message="Required field abortedBits is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class OpenTxnRequest: + """ + Attributes: + - num_txns + - user + - hostname + - agentInfo + - replPolicy + - replSrcTxnIds + - txn_type + + """ + + def __init__( + self, + num_txns=None, + user=None, + hostname=None, + agentInfo="Unknown", + replPolicy=None, + replSrcTxnIds=None, + txn_type=0, + ): + self.num_txns = num_txns + self.user = user + self.hostname = hostname + self.agentInfo = agentInfo + self.replPolicy = replPolicy + self.replSrcTxnIds = replSrcTxnIds + self.txn_type = txn_type + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.I32: + self.num_txns = iprot.readI32() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.user = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.hostname = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRING: + self.agentInfo = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.STRING: + self.replPolicy = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 6: + if ftype == TType.LIST: + self.replSrcTxnIds = [] + (_etype642, _size639) = iprot.readListBegin() + for _i643 in range(_size639): + _elem644 = iprot.readI64() + self.replSrcTxnIds.append(_elem644) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 7: + if ftype == TType.I32: + self.txn_type = iprot.readI32() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("OpenTxnRequest") + if self.num_txns is not None: + oprot.writeFieldBegin("num_txns", TType.I32, 1) + oprot.writeI32(self.num_txns) + oprot.writeFieldEnd() + if self.user is not None: + oprot.writeFieldBegin("user", TType.STRING, 2) + oprot.writeString(self.user.encode("utf-8") if sys.version_info[0] == 2 else self.user) + oprot.writeFieldEnd() + if self.hostname is not None: + oprot.writeFieldBegin("hostname", TType.STRING, 3) + oprot.writeString(self.hostname.encode("utf-8") if sys.version_info[0] == 2 else self.hostname) + oprot.writeFieldEnd() + if self.agentInfo is not None: + oprot.writeFieldBegin("agentInfo", TType.STRING, 4) + oprot.writeString(self.agentInfo.encode("utf-8") if sys.version_info[0] == 2 else self.agentInfo) + oprot.writeFieldEnd() + if self.replPolicy is not None: + oprot.writeFieldBegin("replPolicy", TType.STRING, 5) + oprot.writeString(self.replPolicy.encode("utf-8") if sys.version_info[0] == 2 else self.replPolicy) + oprot.writeFieldEnd() + if self.replSrcTxnIds is not None: + oprot.writeFieldBegin("replSrcTxnIds", TType.LIST, 6) + oprot.writeListBegin(TType.I64, len(self.replSrcTxnIds)) + for iter645 in self.replSrcTxnIds: + oprot.writeI64(iter645) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.txn_type is not None: + oprot.writeFieldBegin("txn_type", TType.I32, 7) + oprot.writeI32(self.txn_type) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.num_txns is None: + raise TProtocolException(message="Required field num_txns is unset!") + if self.user is None: + raise TProtocolException(message="Required field user is unset!") + if self.hostname is None: + raise TProtocolException(message="Required field hostname is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class OpenTxnsResponse: + """ + Attributes: + - txn_ids + + """ + + def __init__( + self, + txn_ids=None, + ): + self.txn_ids = txn_ids + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.LIST: + self.txn_ids = [] + (_etype649, _size646) = iprot.readListBegin() + for _i650 in range(_size646): + _elem651 = iprot.readI64() + self.txn_ids.append(_elem651) + iprot.readListEnd() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("OpenTxnsResponse") + if self.txn_ids is not None: + oprot.writeFieldBegin("txn_ids", TType.LIST, 1) + oprot.writeListBegin(TType.I64, len(self.txn_ids)) + for iter652 in self.txn_ids: + oprot.writeI64(iter652) + oprot.writeListEnd() + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.txn_ids is None: + raise TProtocolException(message="Required field txn_ids is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class AbortTxnRequest: + """ + Attributes: + - txnid + - replPolicy + - txn_type + + """ + + def __init__( + self, + txnid=None, + replPolicy=None, + txn_type=None, + ): + self.txnid = txnid + self.replPolicy = replPolicy + self.txn_type = txn_type + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.I64: + self.txnid = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.replPolicy = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.I32: + self.txn_type = iprot.readI32() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("AbortTxnRequest") + if self.txnid is not None: + oprot.writeFieldBegin("txnid", TType.I64, 1) + oprot.writeI64(self.txnid) + oprot.writeFieldEnd() + if self.replPolicy is not None: + oprot.writeFieldBegin("replPolicy", TType.STRING, 2) + oprot.writeString(self.replPolicy.encode("utf-8") if sys.version_info[0] == 2 else self.replPolicy) + oprot.writeFieldEnd() + if self.txn_type is not None: + oprot.writeFieldBegin("txn_type", TType.I32, 3) + oprot.writeI32(self.txn_type) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.txnid is None: + raise TProtocolException(message="Required field txnid is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class AbortTxnsRequest: + """ + Attributes: + - txn_ids + + """ + + def __init__( + self, + txn_ids=None, + ): + self.txn_ids = txn_ids + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.LIST: + self.txn_ids = [] + (_etype656, _size653) = iprot.readListBegin() + for _i657 in range(_size653): + _elem658 = iprot.readI64() + self.txn_ids.append(_elem658) + iprot.readListEnd() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("AbortTxnsRequest") + if self.txn_ids is not None: + oprot.writeFieldBegin("txn_ids", TType.LIST, 1) + oprot.writeListBegin(TType.I64, len(self.txn_ids)) + for iter659 in self.txn_ids: + oprot.writeI64(iter659) + oprot.writeListEnd() + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.txn_ids is None: + raise TProtocolException(message="Required field txn_ids is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class CommitTxnKeyValue: + """ + Attributes: + - tableId + - key + - value + + """ + + def __init__( + self, + tableId=None, + key=None, + value=None, + ): + self.tableId = tableId + self.key = key + self.value = value + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.I64: + self.tableId = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.key = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.value = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("CommitTxnKeyValue") + if self.tableId is not None: + oprot.writeFieldBegin("tableId", TType.I64, 1) + oprot.writeI64(self.tableId) + oprot.writeFieldEnd() + if self.key is not None: + oprot.writeFieldBegin("key", TType.STRING, 2) + oprot.writeString(self.key.encode("utf-8") if sys.version_info[0] == 2 else self.key) + oprot.writeFieldEnd() + if self.value is not None: + oprot.writeFieldBegin("value", TType.STRING, 3) + oprot.writeString(self.value.encode("utf-8") if sys.version_info[0] == 2 else self.value) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.tableId is None: + raise TProtocolException(message="Required field tableId is unset!") + if self.key is None: + raise TProtocolException(message="Required field key is unset!") + if self.value is None: + raise TProtocolException(message="Required field value is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class WriteEventInfo: + """ + Attributes: + - writeId + - database + - table + - files + - partition + - tableObj + - partitionObj + + """ + + def __init__( + self, + writeId=None, + database=None, + table=None, + files=None, + partition=None, + tableObj=None, + partitionObj=None, + ): + self.writeId = writeId + self.database = database + self.table = table + self.files = files + self.partition = partition + self.tableObj = tableObj + self.partitionObj = partitionObj + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.I64: + self.writeId = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.database = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.table = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRING: + self.files = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.STRING: + self.partition = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 6: + if ftype == TType.STRING: + self.tableObj = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 7: + if ftype == TType.STRING: + self.partitionObj = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("WriteEventInfo") + if self.writeId is not None: + oprot.writeFieldBegin("writeId", TType.I64, 1) + oprot.writeI64(self.writeId) + oprot.writeFieldEnd() + if self.database is not None: + oprot.writeFieldBegin("database", TType.STRING, 2) + oprot.writeString(self.database.encode("utf-8") if sys.version_info[0] == 2 else self.database) + oprot.writeFieldEnd() + if self.table is not None: + oprot.writeFieldBegin("table", TType.STRING, 3) + oprot.writeString(self.table.encode("utf-8") if sys.version_info[0] == 2 else self.table) + oprot.writeFieldEnd() + if self.files is not None: + oprot.writeFieldBegin("files", TType.STRING, 4) + oprot.writeString(self.files.encode("utf-8") if sys.version_info[0] == 2 else self.files) + oprot.writeFieldEnd() + if self.partition is not None: + oprot.writeFieldBegin("partition", TType.STRING, 5) + oprot.writeString(self.partition.encode("utf-8") if sys.version_info[0] == 2 else self.partition) + oprot.writeFieldEnd() + if self.tableObj is not None: + oprot.writeFieldBegin("tableObj", TType.STRING, 6) + oprot.writeString(self.tableObj.encode("utf-8") if sys.version_info[0] == 2 else self.tableObj) + oprot.writeFieldEnd() + if self.partitionObj is not None: + oprot.writeFieldBegin("partitionObj", TType.STRING, 7) + oprot.writeString(self.partitionObj.encode("utf-8") if sys.version_info[0] == 2 else self.partitionObj) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.writeId is None: + raise TProtocolException(message="Required field writeId is unset!") + if self.database is None: + raise TProtocolException(message="Required field database is unset!") + if self.table is None: + raise TProtocolException(message="Required field table is unset!") + if self.files is None: + raise TProtocolException(message="Required field files is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class ReplLastIdInfo: + """ + Attributes: + - database + - lastReplId + - table + - catalog + - partitionList + + """ + + def __init__( + self, + database=None, + lastReplId=None, + table=None, + catalog=None, + partitionList=None, + ): + self.database = database + self.lastReplId = lastReplId + self.table = table + self.catalog = catalog + self.partitionList = partitionList + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.database = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.I64: + self.lastReplId = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.table = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRING: + self.catalog = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.LIST: + self.partitionList = [] + (_etype663, _size660) = iprot.readListBegin() + for _i664 in range(_size660): + _elem665 = ( + iprot.readString().decode("utf-8", errors="replace") + if sys.version_info[0] == 2 + else iprot.readString() + ) + self.partitionList.append(_elem665) + iprot.readListEnd() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("ReplLastIdInfo") + if self.database is not None: + oprot.writeFieldBegin("database", TType.STRING, 1) + oprot.writeString(self.database.encode("utf-8") if sys.version_info[0] == 2 else self.database) + oprot.writeFieldEnd() + if self.lastReplId is not None: + oprot.writeFieldBegin("lastReplId", TType.I64, 2) + oprot.writeI64(self.lastReplId) + oprot.writeFieldEnd() + if self.table is not None: + oprot.writeFieldBegin("table", TType.STRING, 3) + oprot.writeString(self.table.encode("utf-8") if sys.version_info[0] == 2 else self.table) + oprot.writeFieldEnd() + if self.catalog is not None: + oprot.writeFieldBegin("catalog", TType.STRING, 4) + oprot.writeString(self.catalog.encode("utf-8") if sys.version_info[0] == 2 else self.catalog) + oprot.writeFieldEnd() + if self.partitionList is not None: + oprot.writeFieldBegin("partitionList", TType.LIST, 5) + oprot.writeListBegin(TType.STRING, len(self.partitionList)) + for iter666 in self.partitionList: + oprot.writeString(iter666.encode("utf-8") if sys.version_info[0] == 2 else iter666) + oprot.writeListEnd() + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.database is None: + raise TProtocolException(message="Required field database is unset!") + if self.lastReplId is None: + raise TProtocolException(message="Required field lastReplId is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class UpdateTransactionalStatsRequest: + """ + Attributes: + - tableId + - insertCount + - updatedCount + - deletedCount + + """ + + def __init__( + self, + tableId=None, + insertCount=None, + updatedCount=None, + deletedCount=None, + ): + self.tableId = tableId + self.insertCount = insertCount + self.updatedCount = updatedCount + self.deletedCount = deletedCount + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.I64: + self.tableId = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.I64: + self.insertCount = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.I64: + self.updatedCount = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.I64: + self.deletedCount = iprot.readI64() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("UpdateTransactionalStatsRequest") + if self.tableId is not None: + oprot.writeFieldBegin("tableId", TType.I64, 1) + oprot.writeI64(self.tableId) + oprot.writeFieldEnd() + if self.insertCount is not None: + oprot.writeFieldBegin("insertCount", TType.I64, 2) + oprot.writeI64(self.insertCount) + oprot.writeFieldEnd() + if self.updatedCount is not None: + oprot.writeFieldBegin("updatedCount", TType.I64, 3) + oprot.writeI64(self.updatedCount) + oprot.writeFieldEnd() + if self.deletedCount is not None: + oprot.writeFieldBegin("deletedCount", TType.I64, 4) + oprot.writeI64(self.deletedCount) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.tableId is None: + raise TProtocolException(message="Required field tableId is unset!") + if self.insertCount is None: + raise TProtocolException(message="Required field insertCount is unset!") + if self.updatedCount is None: + raise TProtocolException(message="Required field updatedCount is unset!") + if self.deletedCount is None: + raise TProtocolException(message="Required field deletedCount is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class CommitTxnRequest: + """ + Attributes: + - txnid + - replPolicy + - writeEventInfos + - replLastIdInfo + - keyValue + - exclWriteEnabled + - txn_type + + """ + + def __init__( + self, + txnid=None, + replPolicy=None, + writeEventInfos=None, + replLastIdInfo=None, + keyValue=None, + exclWriteEnabled=True, + txn_type=None, + ): + self.txnid = txnid + self.replPolicy = replPolicy + self.writeEventInfos = writeEventInfos + self.replLastIdInfo = replLastIdInfo + self.keyValue = keyValue + self.exclWriteEnabled = exclWriteEnabled + self.txn_type = txn_type + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.I64: + self.txnid = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.replPolicy = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.LIST: + self.writeEventInfos = [] + (_etype670, _size667) = iprot.readListBegin() + for _i671 in range(_size667): + _elem672 = WriteEventInfo() + _elem672.read(iprot) + self.writeEventInfos.append(_elem672) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRUCT: + self.replLastIdInfo = ReplLastIdInfo() + self.replLastIdInfo.read(iprot) + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.STRUCT: + self.keyValue = CommitTxnKeyValue() + self.keyValue.read(iprot) + else: + iprot.skip(ftype) + elif fid == 6: + if ftype == TType.BOOL: + self.exclWriteEnabled = iprot.readBool() + else: + iprot.skip(ftype) + elif fid == 7: + if ftype == TType.I32: + self.txn_type = iprot.readI32() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("CommitTxnRequest") + if self.txnid is not None: + oprot.writeFieldBegin("txnid", TType.I64, 1) + oprot.writeI64(self.txnid) + oprot.writeFieldEnd() + if self.replPolicy is not None: + oprot.writeFieldBegin("replPolicy", TType.STRING, 2) + oprot.writeString(self.replPolicy.encode("utf-8") if sys.version_info[0] == 2 else self.replPolicy) + oprot.writeFieldEnd() + if self.writeEventInfos is not None: + oprot.writeFieldBegin("writeEventInfos", TType.LIST, 3) + oprot.writeListBegin(TType.STRUCT, len(self.writeEventInfos)) + for iter673 in self.writeEventInfos: + iter673.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.replLastIdInfo is not None: + oprot.writeFieldBegin("replLastIdInfo", TType.STRUCT, 4) + self.replLastIdInfo.write(oprot) + oprot.writeFieldEnd() + if self.keyValue is not None: + oprot.writeFieldBegin("keyValue", TType.STRUCT, 5) + self.keyValue.write(oprot) + oprot.writeFieldEnd() + if self.exclWriteEnabled is not None: + oprot.writeFieldBegin("exclWriteEnabled", TType.BOOL, 6) + oprot.writeBool(self.exclWriteEnabled) + oprot.writeFieldEnd() + if self.txn_type is not None: + oprot.writeFieldBegin("txn_type", TType.I32, 7) + oprot.writeI32(self.txn_type) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.txnid is None: + raise TProtocolException(message="Required field txnid is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class ReplTblWriteIdStateRequest: + """ + Attributes: + - validWriteIdlist + - user + - hostName + - dbName + - tableName + - partNames + + """ + + def __init__( + self, + validWriteIdlist=None, + user=None, + hostName=None, + dbName=None, + tableName=None, + partNames=None, + ): + self.validWriteIdlist = validWriteIdlist + self.user = user + self.hostName = hostName + self.dbName = dbName + self.tableName = tableName + self.partNames = partNames + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.validWriteIdlist = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.user = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.hostName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRING: + self.dbName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.STRING: + self.tableName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 6: + if ftype == TType.LIST: + self.partNames = [] + (_etype677, _size674) = iprot.readListBegin() + for _i678 in range(_size674): + _elem679 = ( + iprot.readString().decode("utf-8", errors="replace") + if sys.version_info[0] == 2 + else iprot.readString() + ) + self.partNames.append(_elem679) + iprot.readListEnd() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("ReplTblWriteIdStateRequest") + if self.validWriteIdlist is not None: + oprot.writeFieldBegin("validWriteIdlist", TType.STRING, 1) + oprot.writeString(self.validWriteIdlist.encode("utf-8") if sys.version_info[0] == 2 else self.validWriteIdlist) + oprot.writeFieldEnd() + if self.user is not None: + oprot.writeFieldBegin("user", TType.STRING, 2) + oprot.writeString(self.user.encode("utf-8") if sys.version_info[0] == 2 else self.user) + oprot.writeFieldEnd() + if self.hostName is not None: + oprot.writeFieldBegin("hostName", TType.STRING, 3) + oprot.writeString(self.hostName.encode("utf-8") if sys.version_info[0] == 2 else self.hostName) + oprot.writeFieldEnd() + if self.dbName is not None: + oprot.writeFieldBegin("dbName", TType.STRING, 4) + oprot.writeString(self.dbName.encode("utf-8") if sys.version_info[0] == 2 else self.dbName) + oprot.writeFieldEnd() + if self.tableName is not None: + oprot.writeFieldBegin("tableName", TType.STRING, 5) + oprot.writeString(self.tableName.encode("utf-8") if sys.version_info[0] == 2 else self.tableName) + oprot.writeFieldEnd() + if self.partNames is not None: + oprot.writeFieldBegin("partNames", TType.LIST, 6) + oprot.writeListBegin(TType.STRING, len(self.partNames)) + for iter680 in self.partNames: + oprot.writeString(iter680.encode("utf-8") if sys.version_info[0] == 2 else iter680) + oprot.writeListEnd() + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.validWriteIdlist is None: + raise TProtocolException(message="Required field validWriteIdlist is unset!") + if self.user is None: + raise TProtocolException(message="Required field user is unset!") + if self.hostName is None: + raise TProtocolException(message="Required field hostName is unset!") + if self.dbName is None: + raise TProtocolException(message="Required field dbName is unset!") + if self.tableName is None: + raise TProtocolException(message="Required field tableName is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class GetValidWriteIdsRequest: + """ + Attributes: + - fullTableNames + - validTxnList + - writeId + + """ + + def __init__( + self, + fullTableNames=None, + validTxnList=None, + writeId=None, + ): + self.fullTableNames = fullTableNames + self.validTxnList = validTxnList + self.writeId = writeId + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.LIST: + self.fullTableNames = [] + (_etype684, _size681) = iprot.readListBegin() + for _i685 in range(_size681): + _elem686 = ( + iprot.readString().decode("utf-8", errors="replace") + if sys.version_info[0] == 2 + else iprot.readString() + ) + self.fullTableNames.append(_elem686) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.validTxnList = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.I64: + self.writeId = iprot.readI64() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("GetValidWriteIdsRequest") + if self.fullTableNames is not None: + oprot.writeFieldBegin("fullTableNames", TType.LIST, 1) + oprot.writeListBegin(TType.STRING, len(self.fullTableNames)) + for iter687 in self.fullTableNames: + oprot.writeString(iter687.encode("utf-8") if sys.version_info[0] == 2 else iter687) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.validTxnList is not None: + oprot.writeFieldBegin("validTxnList", TType.STRING, 2) + oprot.writeString(self.validTxnList.encode("utf-8") if sys.version_info[0] == 2 else self.validTxnList) + oprot.writeFieldEnd() + if self.writeId is not None: + oprot.writeFieldBegin("writeId", TType.I64, 3) + oprot.writeI64(self.writeId) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.fullTableNames is None: + raise TProtocolException(message="Required field fullTableNames is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class TableValidWriteIds: + """ + Attributes: + - fullTableName + - writeIdHighWaterMark + - invalidWriteIds + - minOpenWriteId + - abortedBits + + """ + + def __init__( + self, + fullTableName=None, + writeIdHighWaterMark=None, + invalidWriteIds=None, + minOpenWriteId=None, + abortedBits=None, + ): + self.fullTableName = fullTableName + self.writeIdHighWaterMark = writeIdHighWaterMark + self.invalidWriteIds = invalidWriteIds + self.minOpenWriteId = minOpenWriteId + self.abortedBits = abortedBits + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.fullTableName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.I64: + self.writeIdHighWaterMark = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.LIST: + self.invalidWriteIds = [] + (_etype691, _size688) = iprot.readListBegin() + for _i692 in range(_size688): + _elem693 = iprot.readI64() + self.invalidWriteIds.append(_elem693) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.I64: + self.minOpenWriteId = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.STRING: + self.abortedBits = iprot.readBinary() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("TableValidWriteIds") + if self.fullTableName is not None: + oprot.writeFieldBegin("fullTableName", TType.STRING, 1) + oprot.writeString(self.fullTableName.encode("utf-8") if sys.version_info[0] == 2 else self.fullTableName) + oprot.writeFieldEnd() + if self.writeIdHighWaterMark is not None: + oprot.writeFieldBegin("writeIdHighWaterMark", TType.I64, 2) + oprot.writeI64(self.writeIdHighWaterMark) + oprot.writeFieldEnd() + if self.invalidWriteIds is not None: + oprot.writeFieldBegin("invalidWriteIds", TType.LIST, 3) + oprot.writeListBegin(TType.I64, len(self.invalidWriteIds)) + for iter694 in self.invalidWriteIds: + oprot.writeI64(iter694) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.minOpenWriteId is not None: + oprot.writeFieldBegin("minOpenWriteId", TType.I64, 4) + oprot.writeI64(self.minOpenWriteId) + oprot.writeFieldEnd() + if self.abortedBits is not None: + oprot.writeFieldBegin("abortedBits", TType.STRING, 5) + oprot.writeBinary(self.abortedBits) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.fullTableName is None: + raise TProtocolException(message="Required field fullTableName is unset!") + if self.writeIdHighWaterMark is None: + raise TProtocolException(message="Required field writeIdHighWaterMark is unset!") + if self.invalidWriteIds is None: + raise TProtocolException(message="Required field invalidWriteIds is unset!") + if self.abortedBits is None: + raise TProtocolException(message="Required field abortedBits is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class GetValidWriteIdsResponse: + """ + Attributes: + - tblValidWriteIds + + """ + + def __init__( + self, + tblValidWriteIds=None, + ): + self.tblValidWriteIds = tblValidWriteIds + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.LIST: + self.tblValidWriteIds = [] + (_etype698, _size695) = iprot.readListBegin() + for _i699 in range(_size695): + _elem700 = TableValidWriteIds() + _elem700.read(iprot) + self.tblValidWriteIds.append(_elem700) + iprot.readListEnd() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("GetValidWriteIdsResponse") + if self.tblValidWriteIds is not None: + oprot.writeFieldBegin("tblValidWriteIds", TType.LIST, 1) + oprot.writeListBegin(TType.STRUCT, len(self.tblValidWriteIds)) + for iter701 in self.tblValidWriteIds: + iter701.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.tblValidWriteIds is None: + raise TProtocolException(message="Required field tblValidWriteIds is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class TxnToWriteId: + """ + Attributes: + - txnId + - writeId + + """ + + def __init__( + self, + txnId=None, + writeId=None, + ): + self.txnId = txnId + self.writeId = writeId + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.I64: + self.txnId = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.I64: + self.writeId = iprot.readI64() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("TxnToWriteId") + if self.txnId is not None: + oprot.writeFieldBegin("txnId", TType.I64, 1) + oprot.writeI64(self.txnId) + oprot.writeFieldEnd() + if self.writeId is not None: + oprot.writeFieldBegin("writeId", TType.I64, 2) + oprot.writeI64(self.writeId) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.txnId is None: + raise TProtocolException(message="Required field txnId is unset!") + if self.writeId is None: + raise TProtocolException(message="Required field writeId is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class AllocateTableWriteIdsRequest: + """ + Attributes: + - dbName + - tableName + - txnIds + - replPolicy + - srcTxnToWriteIdList + + """ + + def __init__( + self, + dbName=None, + tableName=None, + txnIds=None, + replPolicy=None, + srcTxnToWriteIdList=None, + ): + self.dbName = dbName + self.tableName = tableName + self.txnIds = txnIds + self.replPolicy = replPolicy + self.srcTxnToWriteIdList = srcTxnToWriteIdList + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.dbName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.tableName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.LIST: + self.txnIds = [] + (_etype705, _size702) = iprot.readListBegin() + for _i706 in range(_size702): + _elem707 = iprot.readI64() + self.txnIds.append(_elem707) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRING: + self.replPolicy = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.LIST: + self.srcTxnToWriteIdList = [] + (_etype711, _size708) = iprot.readListBegin() + for _i712 in range(_size708): + _elem713 = TxnToWriteId() + _elem713.read(iprot) + self.srcTxnToWriteIdList.append(_elem713) + iprot.readListEnd() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("AllocateTableWriteIdsRequest") + if self.dbName is not None: + oprot.writeFieldBegin("dbName", TType.STRING, 1) + oprot.writeString(self.dbName.encode("utf-8") if sys.version_info[0] == 2 else self.dbName) + oprot.writeFieldEnd() + if self.tableName is not None: + oprot.writeFieldBegin("tableName", TType.STRING, 2) + oprot.writeString(self.tableName.encode("utf-8") if sys.version_info[0] == 2 else self.tableName) + oprot.writeFieldEnd() + if self.txnIds is not None: + oprot.writeFieldBegin("txnIds", TType.LIST, 3) + oprot.writeListBegin(TType.I64, len(self.txnIds)) + for iter714 in self.txnIds: + oprot.writeI64(iter714) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.replPolicy is not None: + oprot.writeFieldBegin("replPolicy", TType.STRING, 4) + oprot.writeString(self.replPolicy.encode("utf-8") if sys.version_info[0] == 2 else self.replPolicy) + oprot.writeFieldEnd() + if self.srcTxnToWriteIdList is not None: + oprot.writeFieldBegin("srcTxnToWriteIdList", TType.LIST, 5) + oprot.writeListBegin(TType.STRUCT, len(self.srcTxnToWriteIdList)) + for iter715 in self.srcTxnToWriteIdList: + iter715.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.dbName is None: + raise TProtocolException(message="Required field dbName is unset!") + if self.tableName is None: + raise TProtocolException(message="Required field tableName is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class AllocateTableWriteIdsResponse: + """ + Attributes: + - txnToWriteIds + + """ + + def __init__( + self, + txnToWriteIds=None, + ): + self.txnToWriteIds = txnToWriteIds + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.LIST: + self.txnToWriteIds = [] + (_etype719, _size716) = iprot.readListBegin() + for _i720 in range(_size716): + _elem721 = TxnToWriteId() + _elem721.read(iprot) + self.txnToWriteIds.append(_elem721) + iprot.readListEnd() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("AllocateTableWriteIdsResponse") + if self.txnToWriteIds is not None: + oprot.writeFieldBegin("txnToWriteIds", TType.LIST, 1) + oprot.writeListBegin(TType.STRUCT, len(self.txnToWriteIds)) + for iter722 in self.txnToWriteIds: + iter722.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.txnToWriteIds is None: + raise TProtocolException(message="Required field txnToWriteIds is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class MaxAllocatedTableWriteIdRequest: + """ + Attributes: + - dbName + - tableName + + """ + + def __init__( + self, + dbName=None, + tableName=None, + ): + self.dbName = dbName + self.tableName = tableName + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.dbName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.tableName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("MaxAllocatedTableWriteIdRequest") + if self.dbName is not None: + oprot.writeFieldBegin("dbName", TType.STRING, 1) + oprot.writeString(self.dbName.encode("utf-8") if sys.version_info[0] == 2 else self.dbName) + oprot.writeFieldEnd() + if self.tableName is not None: + oprot.writeFieldBegin("tableName", TType.STRING, 2) + oprot.writeString(self.tableName.encode("utf-8") if sys.version_info[0] == 2 else self.tableName) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.dbName is None: + raise TProtocolException(message="Required field dbName is unset!") + if self.tableName is None: + raise TProtocolException(message="Required field tableName is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class MaxAllocatedTableWriteIdResponse: + """ + Attributes: + - maxWriteId + + """ + + def __init__( + self, + maxWriteId=None, + ): + self.maxWriteId = maxWriteId + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.I64: + self.maxWriteId = iprot.readI64() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("MaxAllocatedTableWriteIdResponse") + if self.maxWriteId is not None: + oprot.writeFieldBegin("maxWriteId", TType.I64, 1) + oprot.writeI64(self.maxWriteId) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.maxWriteId is None: + raise TProtocolException(message="Required field maxWriteId is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class SeedTableWriteIdsRequest: + """ + Attributes: + - dbName + - tableName + - seedWriteId + + """ + + def __init__( + self, + dbName=None, + tableName=None, + seedWriteId=None, + ): + self.dbName = dbName + self.tableName = tableName + self.seedWriteId = seedWriteId + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.dbName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.tableName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.I64: + self.seedWriteId = iprot.readI64() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("SeedTableWriteIdsRequest") + if self.dbName is not None: + oprot.writeFieldBegin("dbName", TType.STRING, 1) + oprot.writeString(self.dbName.encode("utf-8") if sys.version_info[0] == 2 else self.dbName) + oprot.writeFieldEnd() + if self.tableName is not None: + oprot.writeFieldBegin("tableName", TType.STRING, 2) + oprot.writeString(self.tableName.encode("utf-8") if sys.version_info[0] == 2 else self.tableName) + oprot.writeFieldEnd() + if self.seedWriteId is not None: + oprot.writeFieldBegin("seedWriteId", TType.I64, 3) + oprot.writeI64(self.seedWriteId) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.dbName is None: + raise TProtocolException(message="Required field dbName is unset!") + if self.tableName is None: + raise TProtocolException(message="Required field tableName is unset!") + if self.seedWriteId is None: + raise TProtocolException(message="Required field seedWriteId is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class SeedTxnIdRequest: + """ + Attributes: + - seedTxnId + + """ + + def __init__( + self, + seedTxnId=None, + ): + self.seedTxnId = seedTxnId + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.I64: + self.seedTxnId = iprot.readI64() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("SeedTxnIdRequest") + if self.seedTxnId is not None: + oprot.writeFieldBegin("seedTxnId", TType.I64, 1) + oprot.writeI64(self.seedTxnId) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.seedTxnId is None: + raise TProtocolException(message="Required field seedTxnId is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class LockComponent: + """ + Attributes: + - type + - level + - dbname + - tablename + - partitionname + - operationType + - isTransactional + - isDynamicPartitionWrite + + """ + + def __init__( + self, + type=None, + level=None, + dbname=None, + tablename=None, + partitionname=None, + operationType=5, + isTransactional=False, + isDynamicPartitionWrite=False, + ): + self.type = type + self.level = level + self.dbname = dbname + self.tablename = tablename + self.partitionname = partitionname + self.operationType = operationType + self.isTransactional = isTransactional + self.isDynamicPartitionWrite = isDynamicPartitionWrite + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.I32: + self.type = iprot.readI32() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.I32: + self.level = iprot.readI32() + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.dbname = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRING: + self.tablename = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.STRING: + self.partitionname = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 6: + if ftype == TType.I32: + self.operationType = iprot.readI32() + else: + iprot.skip(ftype) + elif fid == 7: + if ftype == TType.BOOL: + self.isTransactional = iprot.readBool() + else: + iprot.skip(ftype) + elif fid == 8: + if ftype == TType.BOOL: + self.isDynamicPartitionWrite = iprot.readBool() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("LockComponent") + if self.type is not None: + oprot.writeFieldBegin("type", TType.I32, 1) + oprot.writeI32(self.type) + oprot.writeFieldEnd() + if self.level is not None: + oprot.writeFieldBegin("level", TType.I32, 2) + oprot.writeI32(self.level) + oprot.writeFieldEnd() + if self.dbname is not None: + oprot.writeFieldBegin("dbname", TType.STRING, 3) + oprot.writeString(self.dbname.encode("utf-8") if sys.version_info[0] == 2 else self.dbname) + oprot.writeFieldEnd() + if self.tablename is not None: + oprot.writeFieldBegin("tablename", TType.STRING, 4) + oprot.writeString(self.tablename.encode("utf-8") if sys.version_info[0] == 2 else self.tablename) + oprot.writeFieldEnd() + if self.partitionname is not None: + oprot.writeFieldBegin("partitionname", TType.STRING, 5) + oprot.writeString(self.partitionname.encode("utf-8") if sys.version_info[0] == 2 else self.partitionname) + oprot.writeFieldEnd() + if self.operationType is not None: + oprot.writeFieldBegin("operationType", TType.I32, 6) + oprot.writeI32(self.operationType) + oprot.writeFieldEnd() + if self.isTransactional is not None: + oprot.writeFieldBegin("isTransactional", TType.BOOL, 7) + oprot.writeBool(self.isTransactional) + oprot.writeFieldEnd() + if self.isDynamicPartitionWrite is not None: + oprot.writeFieldBegin("isDynamicPartitionWrite", TType.BOOL, 8) + oprot.writeBool(self.isDynamicPartitionWrite) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.type is None: + raise TProtocolException(message="Required field type is unset!") + if self.level is None: + raise TProtocolException(message="Required field level is unset!") + if self.dbname is None: + raise TProtocolException(message="Required field dbname is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class LockRequest: + """ + Attributes: + - component + - txnid + - user + - hostname + - agentInfo + - zeroWaitReadEnabled + - exclusiveCTAS + + """ + + def __init__( + self, + component=None, + txnid=None, + user=None, + hostname=None, + agentInfo="Unknown", + zeroWaitReadEnabled=False, + exclusiveCTAS=False, + ): + self.component = component + self.txnid = txnid + self.user = user + self.hostname = hostname + self.agentInfo = agentInfo + self.zeroWaitReadEnabled = zeroWaitReadEnabled + self.exclusiveCTAS = exclusiveCTAS + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.LIST: + self.component = [] + (_etype726, _size723) = iprot.readListBegin() + for _i727 in range(_size723): + _elem728 = LockComponent() + _elem728.read(iprot) + self.component.append(_elem728) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.I64: + self.txnid = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.user = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRING: + self.hostname = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.STRING: + self.agentInfo = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 6: + if ftype == TType.BOOL: + self.zeroWaitReadEnabled = iprot.readBool() + else: + iprot.skip(ftype) + elif fid == 7: + if ftype == TType.BOOL: + self.exclusiveCTAS = iprot.readBool() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("LockRequest") + if self.component is not None: + oprot.writeFieldBegin("component", TType.LIST, 1) + oprot.writeListBegin(TType.STRUCT, len(self.component)) + for iter729 in self.component: + iter729.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.txnid is not None: + oprot.writeFieldBegin("txnid", TType.I64, 2) + oprot.writeI64(self.txnid) + oprot.writeFieldEnd() + if self.user is not None: + oprot.writeFieldBegin("user", TType.STRING, 3) + oprot.writeString(self.user.encode("utf-8") if sys.version_info[0] == 2 else self.user) + oprot.writeFieldEnd() + if self.hostname is not None: + oprot.writeFieldBegin("hostname", TType.STRING, 4) + oprot.writeString(self.hostname.encode("utf-8") if sys.version_info[0] == 2 else self.hostname) + oprot.writeFieldEnd() + if self.agentInfo is not None: + oprot.writeFieldBegin("agentInfo", TType.STRING, 5) + oprot.writeString(self.agentInfo.encode("utf-8") if sys.version_info[0] == 2 else self.agentInfo) + oprot.writeFieldEnd() + if self.zeroWaitReadEnabled is not None: + oprot.writeFieldBegin("zeroWaitReadEnabled", TType.BOOL, 6) + oprot.writeBool(self.zeroWaitReadEnabled) + oprot.writeFieldEnd() + if self.exclusiveCTAS is not None: + oprot.writeFieldBegin("exclusiveCTAS", TType.BOOL, 7) + oprot.writeBool(self.exclusiveCTAS) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.component is None: + raise TProtocolException(message="Required field component is unset!") + if self.user is None: + raise TProtocolException(message="Required field user is unset!") + if self.hostname is None: + raise TProtocolException(message="Required field hostname is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class LockResponse: + """ + Attributes: + - lockid + - state + - errorMessage + + """ + + def __init__( + self, + lockid=None, + state=None, + errorMessage=None, + ): + self.lockid = lockid + self.state = state + self.errorMessage = errorMessage + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.I64: + self.lockid = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.I32: + self.state = iprot.readI32() + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.errorMessage = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("LockResponse") + if self.lockid is not None: + oprot.writeFieldBegin("lockid", TType.I64, 1) + oprot.writeI64(self.lockid) + oprot.writeFieldEnd() + if self.state is not None: + oprot.writeFieldBegin("state", TType.I32, 2) + oprot.writeI32(self.state) + oprot.writeFieldEnd() + if self.errorMessage is not None: + oprot.writeFieldBegin("errorMessage", TType.STRING, 3) + oprot.writeString(self.errorMessage.encode("utf-8") if sys.version_info[0] == 2 else self.errorMessage) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.lockid is None: + raise TProtocolException(message="Required field lockid is unset!") + if self.state is None: + raise TProtocolException(message="Required field state is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class CheckLockRequest: + """ + Attributes: + - lockid + - txnid + - elapsed_ms + + """ + + def __init__( + self, + lockid=None, + txnid=None, + elapsed_ms=None, + ): + self.lockid = lockid + self.txnid = txnid + self.elapsed_ms = elapsed_ms + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.I64: + self.lockid = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.I64: + self.txnid = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.I64: + self.elapsed_ms = iprot.readI64() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("CheckLockRequest") + if self.lockid is not None: + oprot.writeFieldBegin("lockid", TType.I64, 1) + oprot.writeI64(self.lockid) + oprot.writeFieldEnd() + if self.txnid is not None: + oprot.writeFieldBegin("txnid", TType.I64, 2) + oprot.writeI64(self.txnid) + oprot.writeFieldEnd() + if self.elapsed_ms is not None: + oprot.writeFieldBegin("elapsed_ms", TType.I64, 3) + oprot.writeI64(self.elapsed_ms) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.lockid is None: + raise TProtocolException(message="Required field lockid is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class UnlockRequest: + """ + Attributes: + - lockid + + """ + + def __init__( + self, + lockid=None, + ): + self.lockid = lockid + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.I64: + self.lockid = iprot.readI64() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("UnlockRequest") + if self.lockid is not None: + oprot.writeFieldBegin("lockid", TType.I64, 1) + oprot.writeI64(self.lockid) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.lockid is None: + raise TProtocolException(message="Required field lockid is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class ShowLocksRequest: + """ + Attributes: + - dbname + - tablename + - partname + - isExtended + - txnid + + """ + + def __init__( + self, + dbname=None, + tablename=None, + partname=None, + isExtended=False, + txnid=None, + ): + self.dbname = dbname + self.tablename = tablename + self.partname = partname + self.isExtended = isExtended + self.txnid = txnid + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.dbname = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.tablename = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.partname = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.BOOL: + self.isExtended = iprot.readBool() + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.I64: + self.txnid = iprot.readI64() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("ShowLocksRequest") + if self.dbname is not None: + oprot.writeFieldBegin("dbname", TType.STRING, 1) + oprot.writeString(self.dbname.encode("utf-8") if sys.version_info[0] == 2 else self.dbname) + oprot.writeFieldEnd() + if self.tablename is not None: + oprot.writeFieldBegin("tablename", TType.STRING, 2) + oprot.writeString(self.tablename.encode("utf-8") if sys.version_info[0] == 2 else self.tablename) + oprot.writeFieldEnd() + if self.partname is not None: + oprot.writeFieldBegin("partname", TType.STRING, 3) + oprot.writeString(self.partname.encode("utf-8") if sys.version_info[0] == 2 else self.partname) + oprot.writeFieldEnd() + if self.isExtended is not None: + oprot.writeFieldBegin("isExtended", TType.BOOL, 4) + oprot.writeBool(self.isExtended) + oprot.writeFieldEnd() + if self.txnid is not None: + oprot.writeFieldBegin("txnid", TType.I64, 5) + oprot.writeI64(self.txnid) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class ShowLocksResponseElement: + """ + Attributes: + - lockid + - dbname + - tablename + - partname + - state + - type + - txnid + - lastheartbeat + - acquiredat + - user + - hostname + - heartbeatCount + - agentInfo + - blockedByExtId + - blockedByIntId + - lockIdInternal + + """ + + def __init__( + self, + lockid=None, + dbname=None, + tablename=None, + partname=None, + state=None, + type=None, + txnid=None, + lastheartbeat=None, + acquiredat=None, + user=None, + hostname=None, + heartbeatCount=0, + agentInfo=None, + blockedByExtId=None, + blockedByIntId=None, + lockIdInternal=None, + ): + self.lockid = lockid + self.dbname = dbname + self.tablename = tablename + self.partname = partname + self.state = state + self.type = type + self.txnid = txnid + self.lastheartbeat = lastheartbeat + self.acquiredat = acquiredat + self.user = user + self.hostname = hostname + self.heartbeatCount = heartbeatCount + self.agentInfo = agentInfo + self.blockedByExtId = blockedByExtId + self.blockedByIntId = blockedByIntId + self.lockIdInternal = lockIdInternal + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.I64: + self.lockid = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.dbname = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.tablename = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRING: + self.partname = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.I32: + self.state = iprot.readI32() + else: + iprot.skip(ftype) + elif fid == 6: + if ftype == TType.I32: + self.type = iprot.readI32() + else: + iprot.skip(ftype) + elif fid == 7: + if ftype == TType.I64: + self.txnid = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 8: + if ftype == TType.I64: + self.lastheartbeat = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 9: + if ftype == TType.I64: + self.acquiredat = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 10: + if ftype == TType.STRING: + self.user = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 11: + if ftype == TType.STRING: + self.hostname = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 12: + if ftype == TType.I32: + self.heartbeatCount = iprot.readI32() + else: + iprot.skip(ftype) + elif fid == 13: + if ftype == TType.STRING: + self.agentInfo = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 14: + if ftype == TType.I64: + self.blockedByExtId = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 15: + if ftype == TType.I64: + self.blockedByIntId = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 16: + if ftype == TType.I64: + self.lockIdInternal = iprot.readI64() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("ShowLocksResponseElement") + if self.lockid is not None: + oprot.writeFieldBegin("lockid", TType.I64, 1) + oprot.writeI64(self.lockid) + oprot.writeFieldEnd() + if self.dbname is not None: + oprot.writeFieldBegin("dbname", TType.STRING, 2) + oprot.writeString(self.dbname.encode("utf-8") if sys.version_info[0] == 2 else self.dbname) + oprot.writeFieldEnd() + if self.tablename is not None: + oprot.writeFieldBegin("tablename", TType.STRING, 3) + oprot.writeString(self.tablename.encode("utf-8") if sys.version_info[0] == 2 else self.tablename) + oprot.writeFieldEnd() + if self.partname is not None: + oprot.writeFieldBegin("partname", TType.STRING, 4) + oprot.writeString(self.partname.encode("utf-8") if sys.version_info[0] == 2 else self.partname) + oprot.writeFieldEnd() + if self.state is not None: + oprot.writeFieldBegin("state", TType.I32, 5) + oprot.writeI32(self.state) + oprot.writeFieldEnd() + if self.type is not None: + oprot.writeFieldBegin("type", TType.I32, 6) + oprot.writeI32(self.type) + oprot.writeFieldEnd() + if self.txnid is not None: + oprot.writeFieldBegin("txnid", TType.I64, 7) + oprot.writeI64(self.txnid) + oprot.writeFieldEnd() + if self.lastheartbeat is not None: + oprot.writeFieldBegin("lastheartbeat", TType.I64, 8) + oprot.writeI64(self.lastheartbeat) + oprot.writeFieldEnd() + if self.acquiredat is not None: + oprot.writeFieldBegin("acquiredat", TType.I64, 9) + oprot.writeI64(self.acquiredat) + oprot.writeFieldEnd() + if self.user is not None: + oprot.writeFieldBegin("user", TType.STRING, 10) + oprot.writeString(self.user.encode("utf-8") if sys.version_info[0] == 2 else self.user) + oprot.writeFieldEnd() + if self.hostname is not None: + oprot.writeFieldBegin("hostname", TType.STRING, 11) + oprot.writeString(self.hostname.encode("utf-8") if sys.version_info[0] == 2 else self.hostname) + oprot.writeFieldEnd() + if self.heartbeatCount is not None: + oprot.writeFieldBegin("heartbeatCount", TType.I32, 12) + oprot.writeI32(self.heartbeatCount) + oprot.writeFieldEnd() + if self.agentInfo is not None: + oprot.writeFieldBegin("agentInfo", TType.STRING, 13) + oprot.writeString(self.agentInfo.encode("utf-8") if sys.version_info[0] == 2 else self.agentInfo) + oprot.writeFieldEnd() + if self.blockedByExtId is not None: + oprot.writeFieldBegin("blockedByExtId", TType.I64, 14) + oprot.writeI64(self.blockedByExtId) + oprot.writeFieldEnd() + if self.blockedByIntId is not None: + oprot.writeFieldBegin("blockedByIntId", TType.I64, 15) + oprot.writeI64(self.blockedByIntId) + oprot.writeFieldEnd() + if self.lockIdInternal is not None: + oprot.writeFieldBegin("lockIdInternal", TType.I64, 16) + oprot.writeI64(self.lockIdInternal) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.lockid is None: + raise TProtocolException(message="Required field lockid is unset!") + if self.dbname is None: + raise TProtocolException(message="Required field dbname is unset!") + if self.state is None: + raise TProtocolException(message="Required field state is unset!") + if self.type is None: + raise TProtocolException(message="Required field type is unset!") + if self.lastheartbeat is None: + raise TProtocolException(message="Required field lastheartbeat is unset!") + if self.user is None: + raise TProtocolException(message="Required field user is unset!") + if self.hostname is None: + raise TProtocolException(message="Required field hostname is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class ShowLocksResponse: + """ + Attributes: + - locks + + """ + + def __init__( + self, + locks=None, + ): + self.locks = locks + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.LIST: + self.locks = [] + (_etype733, _size730) = iprot.readListBegin() + for _i734 in range(_size730): + _elem735 = ShowLocksResponseElement() + _elem735.read(iprot) + self.locks.append(_elem735) + iprot.readListEnd() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("ShowLocksResponse") + if self.locks is not None: + oprot.writeFieldBegin("locks", TType.LIST, 1) + oprot.writeListBegin(TType.STRUCT, len(self.locks)) + for iter736 in self.locks: + iter736.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class HeartbeatRequest: + """ + Attributes: + - lockid + - txnid + + """ + + def __init__( + self, + lockid=None, + txnid=None, + ): + self.lockid = lockid + self.txnid = txnid + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.I64: + self.lockid = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.I64: + self.txnid = iprot.readI64() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("HeartbeatRequest") + if self.lockid is not None: + oprot.writeFieldBegin("lockid", TType.I64, 1) + oprot.writeI64(self.lockid) + oprot.writeFieldEnd() + if self.txnid is not None: + oprot.writeFieldBegin("txnid", TType.I64, 2) + oprot.writeI64(self.txnid) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class HeartbeatTxnRangeRequest: + """ + Attributes: + - min + - max + + """ + + def __init__( + self, + min=None, + max=None, + ): + self.min = min + self.max = max + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.I64: + self.min = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.I64: + self.max = iprot.readI64() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("HeartbeatTxnRangeRequest") + if self.min is not None: + oprot.writeFieldBegin("min", TType.I64, 1) + oprot.writeI64(self.min) + oprot.writeFieldEnd() + if self.max is not None: + oprot.writeFieldBegin("max", TType.I64, 2) + oprot.writeI64(self.max) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.min is None: + raise TProtocolException(message="Required field min is unset!") + if self.max is None: + raise TProtocolException(message="Required field max is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class HeartbeatTxnRangeResponse: + """ + Attributes: + - aborted + - nosuch + + """ + + def __init__( + self, + aborted=None, + nosuch=None, + ): + self.aborted = aborted + self.nosuch = nosuch + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.SET: + self.aborted = set() + (_etype740, _size737) = iprot.readSetBegin() + for _i741 in range(_size737): + _elem742 = iprot.readI64() + self.aborted.add(_elem742) + iprot.readSetEnd() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.SET: + self.nosuch = set() + (_etype746, _size743) = iprot.readSetBegin() + for _i747 in range(_size743): + _elem748 = iprot.readI64() + self.nosuch.add(_elem748) + iprot.readSetEnd() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("HeartbeatTxnRangeResponse") + if self.aborted is not None: + oprot.writeFieldBegin("aborted", TType.SET, 1) + oprot.writeSetBegin(TType.I64, len(self.aborted)) + for iter749 in self.aborted: + oprot.writeI64(iter749) + oprot.writeSetEnd() + oprot.writeFieldEnd() + if self.nosuch is not None: + oprot.writeFieldBegin("nosuch", TType.SET, 2) + oprot.writeSetBegin(TType.I64, len(self.nosuch)) + for iter750 in self.nosuch: + oprot.writeI64(iter750) + oprot.writeSetEnd() + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.aborted is None: + raise TProtocolException(message="Required field aborted is unset!") + if self.nosuch is None: + raise TProtocolException(message="Required field nosuch is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class CompactionRequest: + """ + Attributes: + - dbname + - tablename + - partitionname + - type + - runas + - properties + - initiatorId + - initiatorVersion + + """ + + def __init__( + self, + dbname=None, + tablename=None, + partitionname=None, + type=None, + runas=None, + properties=None, + initiatorId=None, + initiatorVersion=None, + ): + self.dbname = dbname + self.tablename = tablename + self.partitionname = partitionname + self.type = type + self.runas = runas + self.properties = properties + self.initiatorId = initiatorId + self.initiatorVersion = initiatorVersion + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.dbname = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.tablename = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.partitionname = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.I32: + self.type = iprot.readI32() + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.STRING: + self.runas = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 6: + if ftype == TType.MAP: + self.properties = {} + (_ktype752, _vtype753, _size751) = iprot.readMapBegin() + for _i755 in range(_size751): + _key756 = ( + iprot.readString().decode("utf-8", errors="replace") + if sys.version_info[0] == 2 + else iprot.readString() + ) + _val757 = ( + iprot.readString().decode("utf-8", errors="replace") + if sys.version_info[0] == 2 + else iprot.readString() + ) + self.properties[_key756] = _val757 + iprot.readMapEnd() + else: + iprot.skip(ftype) + elif fid == 7: + if ftype == TType.STRING: + self.initiatorId = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 8: + if ftype == TType.STRING: + self.initiatorVersion = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("CompactionRequest") + if self.dbname is not None: + oprot.writeFieldBegin("dbname", TType.STRING, 1) + oprot.writeString(self.dbname.encode("utf-8") if sys.version_info[0] == 2 else self.dbname) + oprot.writeFieldEnd() + if self.tablename is not None: + oprot.writeFieldBegin("tablename", TType.STRING, 2) + oprot.writeString(self.tablename.encode("utf-8") if sys.version_info[0] == 2 else self.tablename) + oprot.writeFieldEnd() + if self.partitionname is not None: + oprot.writeFieldBegin("partitionname", TType.STRING, 3) + oprot.writeString(self.partitionname.encode("utf-8") if sys.version_info[0] == 2 else self.partitionname) + oprot.writeFieldEnd() + if self.type is not None: + oprot.writeFieldBegin("type", TType.I32, 4) + oprot.writeI32(self.type) + oprot.writeFieldEnd() + if self.runas is not None: + oprot.writeFieldBegin("runas", TType.STRING, 5) + oprot.writeString(self.runas.encode("utf-8") if sys.version_info[0] == 2 else self.runas) + oprot.writeFieldEnd() + if self.properties is not None: + oprot.writeFieldBegin("properties", TType.MAP, 6) + oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.properties)) + for kiter758, viter759 in self.properties.items(): + oprot.writeString(kiter758.encode("utf-8") if sys.version_info[0] == 2 else kiter758) + oprot.writeString(viter759.encode("utf-8") if sys.version_info[0] == 2 else viter759) + oprot.writeMapEnd() + oprot.writeFieldEnd() + if self.initiatorId is not None: + oprot.writeFieldBegin("initiatorId", TType.STRING, 7) + oprot.writeString(self.initiatorId.encode("utf-8") if sys.version_info[0] == 2 else self.initiatorId) + oprot.writeFieldEnd() + if self.initiatorVersion is not None: + oprot.writeFieldBegin("initiatorVersion", TType.STRING, 8) + oprot.writeString(self.initiatorVersion.encode("utf-8") if sys.version_info[0] == 2 else self.initiatorVersion) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.dbname is None: + raise TProtocolException(message="Required field dbname is unset!") + if self.tablename is None: + raise TProtocolException(message="Required field tablename is unset!") + if self.type is None: + raise TProtocolException(message="Required field type is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class CompactionInfoStruct: + """ + Attributes: + - id + - dbname + - tablename + - partitionname + - type + - runas + - properties + - toomanyaborts + - state + - workerId + - start + - highestWriteId + - errorMessage + - hasoldabort + - enqueueTime + - retryRetention + + """ + + def __init__( + self, + id=None, + dbname=None, + tablename=None, + partitionname=None, + type=None, + runas=None, + properties=None, + toomanyaborts=None, + state=None, + workerId=None, + start=None, + highestWriteId=None, + errorMessage=None, + hasoldabort=None, + enqueueTime=None, + retryRetention=None, + ): + self.id = id + self.dbname = dbname + self.tablename = tablename + self.partitionname = partitionname + self.type = type + self.runas = runas + self.properties = properties + self.toomanyaborts = toomanyaborts + self.state = state + self.workerId = workerId + self.start = start + self.highestWriteId = highestWriteId + self.errorMessage = errorMessage + self.hasoldabort = hasoldabort + self.enqueueTime = enqueueTime + self.retryRetention = retryRetention + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.I64: + self.id = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.dbname = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.tablename = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRING: + self.partitionname = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.I32: + self.type = iprot.readI32() + else: + iprot.skip(ftype) + elif fid == 6: + if ftype == TType.STRING: + self.runas = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 7: + if ftype == TType.STRING: + self.properties = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 8: + if ftype == TType.BOOL: + self.toomanyaborts = iprot.readBool() + else: + iprot.skip(ftype) + elif fid == 9: + if ftype == TType.STRING: + self.state = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 10: + if ftype == TType.STRING: + self.workerId = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 11: + if ftype == TType.I64: + self.start = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 12: + if ftype == TType.I64: + self.highestWriteId = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 13: + if ftype == TType.STRING: + self.errorMessage = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 14: + if ftype == TType.BOOL: + self.hasoldabort = iprot.readBool() + else: + iprot.skip(ftype) + elif fid == 15: + if ftype == TType.I64: + self.enqueueTime = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 16: + if ftype == TType.I64: + self.retryRetention = iprot.readI64() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("CompactionInfoStruct") + if self.id is not None: + oprot.writeFieldBegin("id", TType.I64, 1) + oprot.writeI64(self.id) + oprot.writeFieldEnd() + if self.dbname is not None: + oprot.writeFieldBegin("dbname", TType.STRING, 2) + oprot.writeString(self.dbname.encode("utf-8") if sys.version_info[0] == 2 else self.dbname) + oprot.writeFieldEnd() + if self.tablename is not None: + oprot.writeFieldBegin("tablename", TType.STRING, 3) + oprot.writeString(self.tablename.encode("utf-8") if sys.version_info[0] == 2 else self.tablename) + oprot.writeFieldEnd() + if self.partitionname is not None: + oprot.writeFieldBegin("partitionname", TType.STRING, 4) + oprot.writeString(self.partitionname.encode("utf-8") if sys.version_info[0] == 2 else self.partitionname) + oprot.writeFieldEnd() + if self.type is not None: + oprot.writeFieldBegin("type", TType.I32, 5) + oprot.writeI32(self.type) + oprot.writeFieldEnd() + if self.runas is not None: + oprot.writeFieldBegin("runas", TType.STRING, 6) + oprot.writeString(self.runas.encode("utf-8") if sys.version_info[0] == 2 else self.runas) + oprot.writeFieldEnd() + if self.properties is not None: + oprot.writeFieldBegin("properties", TType.STRING, 7) + oprot.writeString(self.properties.encode("utf-8") if sys.version_info[0] == 2 else self.properties) + oprot.writeFieldEnd() + if self.toomanyaborts is not None: + oprot.writeFieldBegin("toomanyaborts", TType.BOOL, 8) + oprot.writeBool(self.toomanyaborts) + oprot.writeFieldEnd() + if self.state is not None: + oprot.writeFieldBegin("state", TType.STRING, 9) + oprot.writeString(self.state.encode("utf-8") if sys.version_info[0] == 2 else self.state) + oprot.writeFieldEnd() + if self.workerId is not None: + oprot.writeFieldBegin("workerId", TType.STRING, 10) + oprot.writeString(self.workerId.encode("utf-8") if sys.version_info[0] == 2 else self.workerId) + oprot.writeFieldEnd() + if self.start is not None: + oprot.writeFieldBegin("start", TType.I64, 11) + oprot.writeI64(self.start) + oprot.writeFieldEnd() + if self.highestWriteId is not None: + oprot.writeFieldBegin("highestWriteId", TType.I64, 12) + oprot.writeI64(self.highestWriteId) + oprot.writeFieldEnd() + if self.errorMessage is not None: + oprot.writeFieldBegin("errorMessage", TType.STRING, 13) + oprot.writeString(self.errorMessage.encode("utf-8") if sys.version_info[0] == 2 else self.errorMessage) + oprot.writeFieldEnd() + if self.hasoldabort is not None: + oprot.writeFieldBegin("hasoldabort", TType.BOOL, 14) + oprot.writeBool(self.hasoldabort) + oprot.writeFieldEnd() + if self.enqueueTime is not None: + oprot.writeFieldBegin("enqueueTime", TType.I64, 15) + oprot.writeI64(self.enqueueTime) + oprot.writeFieldEnd() + if self.retryRetention is not None: + oprot.writeFieldBegin("retryRetention", TType.I64, 16) + oprot.writeI64(self.retryRetention) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.id is None: + raise TProtocolException(message="Required field id is unset!") + if self.dbname is None: + raise TProtocolException(message="Required field dbname is unset!") + if self.tablename is None: + raise TProtocolException(message="Required field tablename is unset!") + if self.type is None: + raise TProtocolException(message="Required field type is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class OptionalCompactionInfoStruct: + """ + Attributes: + - ci + + """ + + def __init__( + self, + ci=None, + ): + self.ci = ci + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.ci = CompactionInfoStruct() + self.ci.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("OptionalCompactionInfoStruct") + if self.ci is not None: + oprot.writeFieldBegin("ci", TType.STRUCT, 1) + self.ci.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class CompactionMetricsDataStruct: + """ + Attributes: + - dbname + - tblname + - partitionname + - type + - metricvalue + - version + - threshold + + """ + + def __init__( + self, + dbname=None, + tblname=None, + partitionname=None, + type=None, + metricvalue=None, + version=None, + threshold=None, + ): + self.dbname = dbname + self.tblname = tblname + self.partitionname = partitionname + self.type = type + self.metricvalue = metricvalue + self.version = version + self.threshold = threshold + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.dbname = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.tblname = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.partitionname = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.I32: + self.type = iprot.readI32() + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.I32: + self.metricvalue = iprot.readI32() + else: + iprot.skip(ftype) + elif fid == 6: + if ftype == TType.I32: + self.version = iprot.readI32() + else: + iprot.skip(ftype) + elif fid == 7: + if ftype == TType.I32: + self.threshold = iprot.readI32() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("CompactionMetricsDataStruct") + if self.dbname is not None: + oprot.writeFieldBegin("dbname", TType.STRING, 1) + oprot.writeString(self.dbname.encode("utf-8") if sys.version_info[0] == 2 else self.dbname) + oprot.writeFieldEnd() + if self.tblname is not None: + oprot.writeFieldBegin("tblname", TType.STRING, 2) + oprot.writeString(self.tblname.encode("utf-8") if sys.version_info[0] == 2 else self.tblname) + oprot.writeFieldEnd() + if self.partitionname is not None: + oprot.writeFieldBegin("partitionname", TType.STRING, 3) + oprot.writeString(self.partitionname.encode("utf-8") if sys.version_info[0] == 2 else self.partitionname) + oprot.writeFieldEnd() + if self.type is not None: + oprot.writeFieldBegin("type", TType.I32, 4) + oprot.writeI32(self.type) + oprot.writeFieldEnd() + if self.metricvalue is not None: + oprot.writeFieldBegin("metricvalue", TType.I32, 5) + oprot.writeI32(self.metricvalue) + oprot.writeFieldEnd() + if self.version is not None: + oprot.writeFieldBegin("version", TType.I32, 6) + oprot.writeI32(self.version) + oprot.writeFieldEnd() + if self.threshold is not None: + oprot.writeFieldBegin("threshold", TType.I32, 7) + oprot.writeI32(self.threshold) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.dbname is None: + raise TProtocolException(message="Required field dbname is unset!") + if self.tblname is None: + raise TProtocolException(message="Required field tblname is unset!") + if self.type is None: + raise TProtocolException(message="Required field type is unset!") + if self.metricvalue is None: + raise TProtocolException(message="Required field metricvalue is unset!") + if self.version is None: + raise TProtocolException(message="Required field version is unset!") + if self.threshold is None: + raise TProtocolException(message="Required field threshold is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class CompactionMetricsDataResponse: + """ + Attributes: + - data + + """ + + def __init__( + self, + data=None, + ): + self.data = data + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.data = CompactionMetricsDataStruct() + self.data.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("CompactionMetricsDataResponse") + if self.data is not None: + oprot.writeFieldBegin("data", TType.STRUCT, 1) + self.data.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class CompactionMetricsDataRequest: + """ + Attributes: + - dbName + - tblName + - partitionName + - type + + """ + + def __init__( + self, + dbName=None, + tblName=None, + partitionName=None, + type=None, + ): + self.dbName = dbName + self.tblName = tblName + self.partitionName = partitionName + self.type = type + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.dbName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.tblName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.partitionName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.I32: + self.type = iprot.readI32() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("CompactionMetricsDataRequest") + if self.dbName is not None: + oprot.writeFieldBegin("dbName", TType.STRING, 1) + oprot.writeString(self.dbName.encode("utf-8") if sys.version_info[0] == 2 else self.dbName) + oprot.writeFieldEnd() + if self.tblName is not None: + oprot.writeFieldBegin("tblName", TType.STRING, 2) + oprot.writeString(self.tblName.encode("utf-8") if sys.version_info[0] == 2 else self.tblName) + oprot.writeFieldEnd() + if self.partitionName is not None: + oprot.writeFieldBegin("partitionName", TType.STRING, 3) + oprot.writeString(self.partitionName.encode("utf-8") if sys.version_info[0] == 2 else self.partitionName) + oprot.writeFieldEnd() + if self.type is not None: + oprot.writeFieldBegin("type", TType.I32, 4) + oprot.writeI32(self.type) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.dbName is None: + raise TProtocolException(message="Required field dbName is unset!") + if self.tblName is None: + raise TProtocolException(message="Required field tblName is unset!") + if self.type is None: + raise TProtocolException(message="Required field type is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class CompactionResponse: + """ + Attributes: + - id + - state + - accepted + - errormessage + + """ + + def __init__( + self, + id=None, + state=None, + accepted=None, + errormessage=None, + ): + self.id = id + self.state = state + self.accepted = accepted + self.errormessage = errormessage + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.I64: + self.id = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.state = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.BOOL: + self.accepted = iprot.readBool() + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRING: + self.errormessage = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("CompactionResponse") + if self.id is not None: + oprot.writeFieldBegin("id", TType.I64, 1) + oprot.writeI64(self.id) + oprot.writeFieldEnd() + if self.state is not None: + oprot.writeFieldBegin("state", TType.STRING, 2) + oprot.writeString(self.state.encode("utf-8") if sys.version_info[0] == 2 else self.state) + oprot.writeFieldEnd() + if self.accepted is not None: + oprot.writeFieldBegin("accepted", TType.BOOL, 3) + oprot.writeBool(self.accepted) + oprot.writeFieldEnd() + if self.errormessage is not None: + oprot.writeFieldBegin("errormessage", TType.STRING, 4) + oprot.writeString(self.errormessage.encode("utf-8") if sys.version_info[0] == 2 else self.errormessage) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.id is None: + raise TProtocolException(message="Required field id is unset!") + if self.state is None: + raise TProtocolException(message="Required field state is unset!") + if self.accepted is None: + raise TProtocolException(message="Required field accepted is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class ShowCompactRequest: + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("ShowCompactRequest") + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class ShowCompactResponseElement: + """ + Attributes: + - dbname + - tablename + - partitionname + - type + - state + - workerid + - start + - runAs + - hightestTxnId + - metaInfo + - endTime + - hadoopJobId + - id + - errorMessage + - enqueueTime + - workerVersion + - initiatorId + - initiatorVersion + - cleanerStart + + """ + + def __init__( + self, + dbname=None, + tablename=None, + partitionname=None, + type=None, + state=None, + workerid=None, + start=None, + runAs=None, + hightestTxnId=None, + metaInfo=None, + endTime=None, + hadoopJobId="None", + id=None, + errorMessage=None, + enqueueTime=None, + workerVersion=None, + initiatorId=None, + initiatorVersion=None, + cleanerStart=None, + ): + self.dbname = dbname + self.tablename = tablename + self.partitionname = partitionname + self.type = type + self.state = state + self.workerid = workerid + self.start = start + self.runAs = runAs + self.hightestTxnId = hightestTxnId + self.metaInfo = metaInfo + self.endTime = endTime + self.hadoopJobId = hadoopJobId + self.id = id + self.errorMessage = errorMessage + self.enqueueTime = enqueueTime + self.workerVersion = workerVersion + self.initiatorId = initiatorId + self.initiatorVersion = initiatorVersion + self.cleanerStart = cleanerStart + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.dbname = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.tablename = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.partitionname = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.I32: + self.type = iprot.readI32() + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.STRING: + self.state = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 6: + if ftype == TType.STRING: + self.workerid = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 7: + if ftype == TType.I64: + self.start = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 8: + if ftype == TType.STRING: + self.runAs = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 9: + if ftype == TType.I64: + self.hightestTxnId = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 10: + if ftype == TType.STRING: + self.metaInfo = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 11: + if ftype == TType.I64: + self.endTime = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 12: + if ftype == TType.STRING: + self.hadoopJobId = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 13: + if ftype == TType.I64: + self.id = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 14: + if ftype == TType.STRING: + self.errorMessage = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 15: + if ftype == TType.I64: + self.enqueueTime = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 16: + if ftype == TType.STRING: + self.workerVersion = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 17: + if ftype == TType.STRING: + self.initiatorId = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 18: + if ftype == TType.STRING: + self.initiatorVersion = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 19: + if ftype == TType.I64: + self.cleanerStart = iprot.readI64() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("ShowCompactResponseElement") + if self.dbname is not None: + oprot.writeFieldBegin("dbname", TType.STRING, 1) + oprot.writeString(self.dbname.encode("utf-8") if sys.version_info[0] == 2 else self.dbname) + oprot.writeFieldEnd() + if self.tablename is not None: + oprot.writeFieldBegin("tablename", TType.STRING, 2) + oprot.writeString(self.tablename.encode("utf-8") if sys.version_info[0] == 2 else self.tablename) + oprot.writeFieldEnd() + if self.partitionname is not None: + oprot.writeFieldBegin("partitionname", TType.STRING, 3) + oprot.writeString(self.partitionname.encode("utf-8") if sys.version_info[0] == 2 else self.partitionname) + oprot.writeFieldEnd() + if self.type is not None: + oprot.writeFieldBegin("type", TType.I32, 4) + oprot.writeI32(self.type) + oprot.writeFieldEnd() + if self.state is not None: + oprot.writeFieldBegin("state", TType.STRING, 5) + oprot.writeString(self.state.encode("utf-8") if sys.version_info[0] == 2 else self.state) + oprot.writeFieldEnd() + if self.workerid is not None: + oprot.writeFieldBegin("workerid", TType.STRING, 6) + oprot.writeString(self.workerid.encode("utf-8") if sys.version_info[0] == 2 else self.workerid) + oprot.writeFieldEnd() + if self.start is not None: + oprot.writeFieldBegin("start", TType.I64, 7) + oprot.writeI64(self.start) + oprot.writeFieldEnd() + if self.runAs is not None: + oprot.writeFieldBegin("runAs", TType.STRING, 8) + oprot.writeString(self.runAs.encode("utf-8") if sys.version_info[0] == 2 else self.runAs) + oprot.writeFieldEnd() + if self.hightestTxnId is not None: + oprot.writeFieldBegin("hightestTxnId", TType.I64, 9) + oprot.writeI64(self.hightestTxnId) + oprot.writeFieldEnd() + if self.metaInfo is not None: + oprot.writeFieldBegin("metaInfo", TType.STRING, 10) + oprot.writeString(self.metaInfo.encode("utf-8") if sys.version_info[0] == 2 else self.metaInfo) + oprot.writeFieldEnd() + if self.endTime is not None: + oprot.writeFieldBegin("endTime", TType.I64, 11) + oprot.writeI64(self.endTime) + oprot.writeFieldEnd() + if self.hadoopJobId is not None: + oprot.writeFieldBegin("hadoopJobId", TType.STRING, 12) + oprot.writeString(self.hadoopJobId.encode("utf-8") if sys.version_info[0] == 2 else self.hadoopJobId) + oprot.writeFieldEnd() + if self.id is not None: + oprot.writeFieldBegin("id", TType.I64, 13) + oprot.writeI64(self.id) + oprot.writeFieldEnd() + if self.errorMessage is not None: + oprot.writeFieldBegin("errorMessage", TType.STRING, 14) + oprot.writeString(self.errorMessage.encode("utf-8") if sys.version_info[0] == 2 else self.errorMessage) + oprot.writeFieldEnd() + if self.enqueueTime is not None: + oprot.writeFieldBegin("enqueueTime", TType.I64, 15) + oprot.writeI64(self.enqueueTime) + oprot.writeFieldEnd() + if self.workerVersion is not None: + oprot.writeFieldBegin("workerVersion", TType.STRING, 16) + oprot.writeString(self.workerVersion.encode("utf-8") if sys.version_info[0] == 2 else self.workerVersion) + oprot.writeFieldEnd() + if self.initiatorId is not None: + oprot.writeFieldBegin("initiatorId", TType.STRING, 17) + oprot.writeString(self.initiatorId.encode("utf-8") if sys.version_info[0] == 2 else self.initiatorId) + oprot.writeFieldEnd() + if self.initiatorVersion is not None: + oprot.writeFieldBegin("initiatorVersion", TType.STRING, 18) + oprot.writeString(self.initiatorVersion.encode("utf-8") if sys.version_info[0] == 2 else self.initiatorVersion) + oprot.writeFieldEnd() + if self.cleanerStart is not None: + oprot.writeFieldBegin("cleanerStart", TType.I64, 19) + oprot.writeI64(self.cleanerStart) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.dbname is None: + raise TProtocolException(message="Required field dbname is unset!") + if self.tablename is None: + raise TProtocolException(message="Required field tablename is unset!") + if self.type is None: + raise TProtocolException(message="Required field type is unset!") + if self.state is None: + raise TProtocolException(message="Required field state is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class ShowCompactResponse: + """ + Attributes: + - compacts + + """ + + def __init__( + self, + compacts=None, + ): + self.compacts = compacts + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.LIST: + self.compacts = [] + (_etype763, _size760) = iprot.readListBegin() + for _i764 in range(_size760): + _elem765 = ShowCompactResponseElement() + _elem765.read(iprot) + self.compacts.append(_elem765) + iprot.readListEnd() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("ShowCompactResponse") + if self.compacts is not None: + oprot.writeFieldBegin("compacts", TType.LIST, 1) + oprot.writeListBegin(TType.STRUCT, len(self.compacts)) + for iter766 in self.compacts: + iter766.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.compacts is None: + raise TProtocolException(message="Required field compacts is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class GetLatestCommittedCompactionInfoRequest: + """ + Attributes: + - dbname + - tablename + - partitionnames + - lastCompactionId + + """ + + def __init__( + self, + dbname=None, + tablename=None, + partitionnames=None, + lastCompactionId=None, + ): + self.dbname = dbname + self.tablename = tablename + self.partitionnames = partitionnames + self.lastCompactionId = lastCompactionId + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.dbname = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.tablename = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.LIST: + self.partitionnames = [] + (_etype770, _size767) = iprot.readListBegin() + for _i771 in range(_size767): + _elem772 = ( + iprot.readString().decode("utf-8", errors="replace") + if sys.version_info[0] == 2 + else iprot.readString() + ) + self.partitionnames.append(_elem772) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.I64: + self.lastCompactionId = iprot.readI64() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("GetLatestCommittedCompactionInfoRequest") + if self.dbname is not None: + oprot.writeFieldBegin("dbname", TType.STRING, 1) + oprot.writeString(self.dbname.encode("utf-8") if sys.version_info[0] == 2 else self.dbname) + oprot.writeFieldEnd() + if self.tablename is not None: + oprot.writeFieldBegin("tablename", TType.STRING, 2) + oprot.writeString(self.tablename.encode("utf-8") if sys.version_info[0] == 2 else self.tablename) + oprot.writeFieldEnd() + if self.partitionnames is not None: + oprot.writeFieldBegin("partitionnames", TType.LIST, 3) + oprot.writeListBegin(TType.STRING, len(self.partitionnames)) + for iter773 in self.partitionnames: + oprot.writeString(iter773.encode("utf-8") if sys.version_info[0] == 2 else iter773) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.lastCompactionId is not None: + oprot.writeFieldBegin("lastCompactionId", TType.I64, 4) + oprot.writeI64(self.lastCompactionId) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.dbname is None: + raise TProtocolException(message="Required field dbname is unset!") + if self.tablename is None: + raise TProtocolException(message="Required field tablename is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class GetLatestCommittedCompactionInfoResponse: + """ + Attributes: + - compactions + + """ + + def __init__( + self, + compactions=None, + ): + self.compactions = compactions + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.LIST: + self.compactions = [] + (_etype777, _size774) = iprot.readListBegin() + for _i778 in range(_size774): + _elem779 = CompactionInfoStruct() + _elem779.read(iprot) + self.compactions.append(_elem779) + iprot.readListEnd() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("GetLatestCommittedCompactionInfoResponse") + if self.compactions is not None: + oprot.writeFieldBegin("compactions", TType.LIST, 1) + oprot.writeListBegin(TType.STRUCT, len(self.compactions)) + for iter780 in self.compactions: + iter780.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.compactions is None: + raise TProtocolException(message="Required field compactions is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class FindNextCompactRequest: + """ + Attributes: + - workerId + - workerVersion + + """ + + def __init__( + self, + workerId=None, + workerVersion=None, + ): + self.workerId = workerId + self.workerVersion = workerVersion + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.workerId = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.workerVersion = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("FindNextCompactRequest") + if self.workerId is not None: + oprot.writeFieldBegin("workerId", TType.STRING, 1) + oprot.writeString(self.workerId.encode("utf-8") if sys.version_info[0] == 2 else self.workerId) + oprot.writeFieldEnd() + if self.workerVersion is not None: + oprot.writeFieldBegin("workerVersion", TType.STRING, 2) + oprot.writeString(self.workerVersion.encode("utf-8") if sys.version_info[0] == 2 else self.workerVersion) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class AddDynamicPartitions: + """ + Attributes: + - txnid + - writeid + - dbname + - tablename + - partitionnames + - operationType + + """ + + def __init__( + self, + txnid=None, + writeid=None, + dbname=None, + tablename=None, + partitionnames=None, + operationType=5, + ): + self.txnid = txnid + self.writeid = writeid + self.dbname = dbname + self.tablename = tablename + self.partitionnames = partitionnames + self.operationType = operationType + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.I64: + self.txnid = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.I64: + self.writeid = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.dbname = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRING: + self.tablename = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.LIST: + self.partitionnames = [] + (_etype784, _size781) = iprot.readListBegin() + for _i785 in range(_size781): + _elem786 = ( + iprot.readString().decode("utf-8", errors="replace") + if sys.version_info[0] == 2 + else iprot.readString() + ) + self.partitionnames.append(_elem786) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 6: + if ftype == TType.I32: + self.operationType = iprot.readI32() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("AddDynamicPartitions") + if self.txnid is not None: + oprot.writeFieldBegin("txnid", TType.I64, 1) + oprot.writeI64(self.txnid) + oprot.writeFieldEnd() + if self.writeid is not None: + oprot.writeFieldBegin("writeid", TType.I64, 2) + oprot.writeI64(self.writeid) + oprot.writeFieldEnd() + if self.dbname is not None: + oprot.writeFieldBegin("dbname", TType.STRING, 3) + oprot.writeString(self.dbname.encode("utf-8") if sys.version_info[0] == 2 else self.dbname) + oprot.writeFieldEnd() + if self.tablename is not None: + oprot.writeFieldBegin("tablename", TType.STRING, 4) + oprot.writeString(self.tablename.encode("utf-8") if sys.version_info[0] == 2 else self.tablename) + oprot.writeFieldEnd() + if self.partitionnames is not None: + oprot.writeFieldBegin("partitionnames", TType.LIST, 5) + oprot.writeListBegin(TType.STRING, len(self.partitionnames)) + for iter787 in self.partitionnames: + oprot.writeString(iter787.encode("utf-8") if sys.version_info[0] == 2 else iter787) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.operationType is not None: + oprot.writeFieldBegin("operationType", TType.I32, 6) + oprot.writeI32(self.operationType) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.txnid is None: + raise TProtocolException(message="Required field txnid is unset!") + if self.writeid is None: + raise TProtocolException(message="Required field writeid is unset!") + if self.dbname is None: + raise TProtocolException(message="Required field dbname is unset!") + if self.tablename is None: + raise TProtocolException(message="Required field tablename is unset!") + if self.partitionnames is None: + raise TProtocolException(message="Required field partitionnames is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class BasicTxnInfo: + """ + Attributes: + - isnull + - time + - txnid + - dbname + - tablename + - partitionname + + """ + + def __init__( + self, + isnull=None, + time=None, + txnid=None, + dbname=None, + tablename=None, + partitionname=None, + ): + self.isnull = isnull + self.time = time + self.txnid = txnid + self.dbname = dbname + self.tablename = tablename + self.partitionname = partitionname + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.BOOL: + self.isnull = iprot.readBool() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.I64: + self.time = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.I64: + self.txnid = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRING: + self.dbname = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.STRING: + self.tablename = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 6: + if ftype == TType.STRING: + self.partitionname = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("BasicTxnInfo") + if self.isnull is not None: + oprot.writeFieldBegin("isnull", TType.BOOL, 1) + oprot.writeBool(self.isnull) + oprot.writeFieldEnd() + if self.time is not None: + oprot.writeFieldBegin("time", TType.I64, 2) + oprot.writeI64(self.time) + oprot.writeFieldEnd() + if self.txnid is not None: + oprot.writeFieldBegin("txnid", TType.I64, 3) + oprot.writeI64(self.txnid) + oprot.writeFieldEnd() + if self.dbname is not None: + oprot.writeFieldBegin("dbname", TType.STRING, 4) + oprot.writeString(self.dbname.encode("utf-8") if sys.version_info[0] == 2 else self.dbname) + oprot.writeFieldEnd() + if self.tablename is not None: + oprot.writeFieldBegin("tablename", TType.STRING, 5) + oprot.writeString(self.tablename.encode("utf-8") if sys.version_info[0] == 2 else self.tablename) + oprot.writeFieldEnd() + if self.partitionname is not None: + oprot.writeFieldBegin("partitionname", TType.STRING, 6) + oprot.writeString(self.partitionname.encode("utf-8") if sys.version_info[0] == 2 else self.partitionname) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.isnull is None: + raise TProtocolException(message="Required field isnull is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class NotificationEventRequest: + """ + Attributes: + - lastEvent + - maxEvents + - eventTypeSkipList + + """ + + def __init__( + self, + lastEvent=None, + maxEvents=None, + eventTypeSkipList=None, + ): + self.lastEvent = lastEvent + self.maxEvents = maxEvents + self.eventTypeSkipList = eventTypeSkipList + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.I64: + self.lastEvent = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.I32: + self.maxEvents = iprot.readI32() + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.LIST: + self.eventTypeSkipList = [] + (_etype791, _size788) = iprot.readListBegin() + for _i792 in range(_size788): + _elem793 = ( + iprot.readString().decode("utf-8", errors="replace") + if sys.version_info[0] == 2 + else iprot.readString() + ) + self.eventTypeSkipList.append(_elem793) + iprot.readListEnd() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("NotificationEventRequest") + if self.lastEvent is not None: + oprot.writeFieldBegin("lastEvent", TType.I64, 1) + oprot.writeI64(self.lastEvent) + oprot.writeFieldEnd() + if self.maxEvents is not None: + oprot.writeFieldBegin("maxEvents", TType.I32, 2) + oprot.writeI32(self.maxEvents) + oprot.writeFieldEnd() + if self.eventTypeSkipList is not None: + oprot.writeFieldBegin("eventTypeSkipList", TType.LIST, 3) + oprot.writeListBegin(TType.STRING, len(self.eventTypeSkipList)) + for iter794 in self.eventTypeSkipList: + oprot.writeString(iter794.encode("utf-8") if sys.version_info[0] == 2 else iter794) + oprot.writeListEnd() + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.lastEvent is None: + raise TProtocolException(message="Required field lastEvent is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class NotificationEvent: + """ + Attributes: + - eventId + - eventTime + - eventType + - dbName + - tableName + - message + - messageFormat + - catName + + """ + + def __init__( + self, + eventId=None, + eventTime=None, + eventType=None, + dbName=None, + tableName=None, + message=None, + messageFormat=None, + catName=None, + ): + self.eventId = eventId + self.eventTime = eventTime + self.eventType = eventType + self.dbName = dbName + self.tableName = tableName + self.message = message + self.messageFormat = messageFormat + self.catName = catName + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.I64: + self.eventId = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.I32: + self.eventTime = iprot.readI32() + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.eventType = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRING: + self.dbName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.STRING: + self.tableName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 6: + if ftype == TType.STRING: + self.message = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 7: + if ftype == TType.STRING: + self.messageFormat = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 8: + if ftype == TType.STRING: + self.catName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("NotificationEvent") + if self.eventId is not None: + oprot.writeFieldBegin("eventId", TType.I64, 1) + oprot.writeI64(self.eventId) + oprot.writeFieldEnd() + if self.eventTime is not None: + oprot.writeFieldBegin("eventTime", TType.I32, 2) + oprot.writeI32(self.eventTime) + oprot.writeFieldEnd() + if self.eventType is not None: + oprot.writeFieldBegin("eventType", TType.STRING, 3) + oprot.writeString(self.eventType.encode("utf-8") if sys.version_info[0] == 2 else self.eventType) + oprot.writeFieldEnd() + if self.dbName is not None: + oprot.writeFieldBegin("dbName", TType.STRING, 4) + oprot.writeString(self.dbName.encode("utf-8") if sys.version_info[0] == 2 else self.dbName) + oprot.writeFieldEnd() + if self.tableName is not None: + oprot.writeFieldBegin("tableName", TType.STRING, 5) + oprot.writeString(self.tableName.encode("utf-8") if sys.version_info[0] == 2 else self.tableName) + oprot.writeFieldEnd() + if self.message is not None: + oprot.writeFieldBegin("message", TType.STRING, 6) + oprot.writeString(self.message.encode("utf-8") if sys.version_info[0] == 2 else self.message) + oprot.writeFieldEnd() + if self.messageFormat is not None: + oprot.writeFieldBegin("messageFormat", TType.STRING, 7) + oprot.writeString(self.messageFormat.encode("utf-8") if sys.version_info[0] == 2 else self.messageFormat) + oprot.writeFieldEnd() + if self.catName is not None: + oprot.writeFieldBegin("catName", TType.STRING, 8) + oprot.writeString(self.catName.encode("utf-8") if sys.version_info[0] == 2 else self.catName) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.eventId is None: + raise TProtocolException(message="Required field eventId is unset!") + if self.eventTime is None: + raise TProtocolException(message="Required field eventTime is unset!") + if self.eventType is None: + raise TProtocolException(message="Required field eventType is unset!") + if self.message is None: + raise TProtocolException(message="Required field message is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class NotificationEventResponse: + """ + Attributes: + - events + + """ + + def __init__( + self, + events=None, + ): + self.events = events + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.LIST: + self.events = [] + (_etype798, _size795) = iprot.readListBegin() + for _i799 in range(_size795): + _elem800 = NotificationEvent() + _elem800.read(iprot) + self.events.append(_elem800) + iprot.readListEnd() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("NotificationEventResponse") + if self.events is not None: + oprot.writeFieldBegin("events", TType.LIST, 1) + oprot.writeListBegin(TType.STRUCT, len(self.events)) + for iter801 in self.events: + iter801.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.events is None: + raise TProtocolException(message="Required field events is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class CurrentNotificationEventId: + """ + Attributes: + - eventId + + """ + + def __init__( + self, + eventId=None, + ): + self.eventId = eventId + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.I64: + self.eventId = iprot.readI64() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("CurrentNotificationEventId") + if self.eventId is not None: + oprot.writeFieldBegin("eventId", TType.I64, 1) + oprot.writeI64(self.eventId) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.eventId is None: + raise TProtocolException(message="Required field eventId is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class NotificationEventsCountRequest: + """ + Attributes: + - fromEventId + - dbName + - catName + - toEventId + - limit + + """ + + def __init__( + self, + fromEventId=None, + dbName=None, + catName=None, + toEventId=None, + limit=None, + ): + self.fromEventId = fromEventId + self.dbName = dbName + self.catName = catName + self.toEventId = toEventId + self.limit = limit + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.I64: + self.fromEventId = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.dbName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.catName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.I64: + self.toEventId = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.I64: + self.limit = iprot.readI64() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("NotificationEventsCountRequest") + if self.fromEventId is not None: + oprot.writeFieldBegin("fromEventId", TType.I64, 1) + oprot.writeI64(self.fromEventId) + oprot.writeFieldEnd() + if self.dbName is not None: + oprot.writeFieldBegin("dbName", TType.STRING, 2) + oprot.writeString(self.dbName.encode("utf-8") if sys.version_info[0] == 2 else self.dbName) + oprot.writeFieldEnd() + if self.catName is not None: + oprot.writeFieldBegin("catName", TType.STRING, 3) + oprot.writeString(self.catName.encode("utf-8") if sys.version_info[0] == 2 else self.catName) + oprot.writeFieldEnd() + if self.toEventId is not None: + oprot.writeFieldBegin("toEventId", TType.I64, 4) + oprot.writeI64(self.toEventId) + oprot.writeFieldEnd() + if self.limit is not None: + oprot.writeFieldBegin("limit", TType.I64, 5) + oprot.writeI64(self.limit) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.fromEventId is None: + raise TProtocolException(message="Required field fromEventId is unset!") + if self.dbName is None: + raise TProtocolException(message="Required field dbName is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class NotificationEventsCountResponse: + """ + Attributes: + - eventsCount + + """ + + def __init__( + self, + eventsCount=None, + ): + self.eventsCount = eventsCount + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.I64: + self.eventsCount = iprot.readI64() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("NotificationEventsCountResponse") + if self.eventsCount is not None: + oprot.writeFieldBegin("eventsCount", TType.I64, 1) + oprot.writeI64(self.eventsCount) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.eventsCount is None: + raise TProtocolException(message="Required field eventsCount is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class InsertEventRequestData: + """ + Attributes: + - replace + - filesAdded + - filesAddedChecksum + - subDirectoryList + - partitionVal + + """ + + def __init__( + self, + replace=None, + filesAdded=None, + filesAddedChecksum=None, + subDirectoryList=None, + partitionVal=None, + ): + self.replace = replace + self.filesAdded = filesAdded + self.filesAddedChecksum = filesAddedChecksum + self.subDirectoryList = subDirectoryList + self.partitionVal = partitionVal + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.BOOL: + self.replace = iprot.readBool() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.LIST: + self.filesAdded = [] + (_etype805, _size802) = iprot.readListBegin() + for _i806 in range(_size802): + _elem807 = ( + iprot.readString().decode("utf-8", errors="replace") + if sys.version_info[0] == 2 + else iprot.readString() + ) + self.filesAdded.append(_elem807) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.LIST: + self.filesAddedChecksum = [] + (_etype811, _size808) = iprot.readListBegin() + for _i812 in range(_size808): + _elem813 = ( + iprot.readString().decode("utf-8", errors="replace") + if sys.version_info[0] == 2 + else iprot.readString() + ) + self.filesAddedChecksum.append(_elem813) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.LIST: + self.subDirectoryList = [] + (_etype817, _size814) = iprot.readListBegin() + for _i818 in range(_size814): + _elem819 = ( + iprot.readString().decode("utf-8", errors="replace") + if sys.version_info[0] == 2 + else iprot.readString() + ) + self.subDirectoryList.append(_elem819) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.LIST: + self.partitionVal = [] + (_etype823, _size820) = iprot.readListBegin() + for _i824 in range(_size820): + _elem825 = ( + iprot.readString().decode("utf-8", errors="replace") + if sys.version_info[0] == 2 + else iprot.readString() + ) + self.partitionVal.append(_elem825) + iprot.readListEnd() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("InsertEventRequestData") + if self.replace is not None: + oprot.writeFieldBegin("replace", TType.BOOL, 1) + oprot.writeBool(self.replace) + oprot.writeFieldEnd() + if self.filesAdded is not None: + oprot.writeFieldBegin("filesAdded", TType.LIST, 2) + oprot.writeListBegin(TType.STRING, len(self.filesAdded)) + for iter826 in self.filesAdded: + oprot.writeString(iter826.encode("utf-8") if sys.version_info[0] == 2 else iter826) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.filesAddedChecksum is not None: + oprot.writeFieldBegin("filesAddedChecksum", TType.LIST, 3) + oprot.writeListBegin(TType.STRING, len(self.filesAddedChecksum)) + for iter827 in self.filesAddedChecksum: + oprot.writeString(iter827.encode("utf-8") if sys.version_info[0] == 2 else iter827) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.subDirectoryList is not None: + oprot.writeFieldBegin("subDirectoryList", TType.LIST, 4) + oprot.writeListBegin(TType.STRING, len(self.subDirectoryList)) + for iter828 in self.subDirectoryList: + oprot.writeString(iter828.encode("utf-8") if sys.version_info[0] == 2 else iter828) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.partitionVal is not None: + oprot.writeFieldBegin("partitionVal", TType.LIST, 5) + oprot.writeListBegin(TType.STRING, len(self.partitionVal)) + for iter829 in self.partitionVal: + oprot.writeString(iter829.encode("utf-8") if sys.version_info[0] == 2 else iter829) + oprot.writeListEnd() + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.filesAdded is None: + raise TProtocolException(message="Required field filesAdded is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class FireEventRequestData: + """ + Attributes: + - insertData + - insertDatas + + """ + + def __init__( + self, + insertData=None, + insertDatas=None, + ): + self.insertData = insertData + self.insertDatas = insertDatas + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.insertData = InsertEventRequestData() + self.insertData.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.LIST: + self.insertDatas = [] + (_etype833, _size830) = iprot.readListBegin() + for _i834 in range(_size830): + _elem835 = InsertEventRequestData() + _elem835.read(iprot) + self.insertDatas.append(_elem835) + iprot.readListEnd() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("FireEventRequestData") + if self.insertData is not None: + oprot.writeFieldBegin("insertData", TType.STRUCT, 1) + self.insertData.write(oprot) + oprot.writeFieldEnd() + if self.insertDatas is not None: + oprot.writeFieldBegin("insertDatas", TType.LIST, 2) + oprot.writeListBegin(TType.STRUCT, len(self.insertDatas)) + for iter836 in self.insertDatas: + iter836.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class FireEventRequest: + """ + Attributes: + - successful + - data + - dbName + - tableName + - partitionVals + - catName + + """ + + def __init__( + self, + successful=None, + data=None, + dbName=None, + tableName=None, + partitionVals=None, + catName=None, + ): + self.successful = successful + self.data = data + self.dbName = dbName + self.tableName = tableName + self.partitionVals = partitionVals + self.catName = catName + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.BOOL: + self.successful = iprot.readBool() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.data = FireEventRequestData() + self.data.read(iprot) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.dbName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRING: + self.tableName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.LIST: + self.partitionVals = [] + (_etype840, _size837) = iprot.readListBegin() + for _i841 in range(_size837): + _elem842 = ( + iprot.readString().decode("utf-8", errors="replace") + if sys.version_info[0] == 2 + else iprot.readString() + ) + self.partitionVals.append(_elem842) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 6: + if ftype == TType.STRING: + self.catName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("FireEventRequest") + if self.successful is not None: + oprot.writeFieldBegin("successful", TType.BOOL, 1) + oprot.writeBool(self.successful) + oprot.writeFieldEnd() + if self.data is not None: + oprot.writeFieldBegin("data", TType.STRUCT, 2) + self.data.write(oprot) + oprot.writeFieldEnd() + if self.dbName is not None: + oprot.writeFieldBegin("dbName", TType.STRING, 3) + oprot.writeString(self.dbName.encode("utf-8") if sys.version_info[0] == 2 else self.dbName) + oprot.writeFieldEnd() + if self.tableName is not None: + oprot.writeFieldBegin("tableName", TType.STRING, 4) + oprot.writeString(self.tableName.encode("utf-8") if sys.version_info[0] == 2 else self.tableName) + oprot.writeFieldEnd() + if self.partitionVals is not None: + oprot.writeFieldBegin("partitionVals", TType.LIST, 5) + oprot.writeListBegin(TType.STRING, len(self.partitionVals)) + for iter843 in self.partitionVals: + oprot.writeString(iter843.encode("utf-8") if sys.version_info[0] == 2 else iter843) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.catName is not None: + oprot.writeFieldBegin("catName", TType.STRING, 6) + oprot.writeString(self.catName.encode("utf-8") if sys.version_info[0] == 2 else self.catName) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.successful is None: + raise TProtocolException(message="Required field successful is unset!") + if self.data is None: + raise TProtocolException(message="Required field data is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class FireEventResponse: + """ + Attributes: + - eventIds + + """ + + def __init__( + self, + eventIds=None, + ): + self.eventIds = eventIds + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.LIST: + self.eventIds = [] + (_etype847, _size844) = iprot.readListBegin() + for _i848 in range(_size844): + _elem849 = iprot.readI64() + self.eventIds.append(_elem849) + iprot.readListEnd() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("FireEventResponse") + if self.eventIds is not None: + oprot.writeFieldBegin("eventIds", TType.LIST, 1) + oprot.writeListBegin(TType.I64, len(self.eventIds)) + for iter850 in self.eventIds: + oprot.writeI64(iter850) + oprot.writeListEnd() + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class WriteNotificationLogRequest: + """ + Attributes: + - txnId + - writeId + - db + - table + - fileInfo + - partitionVals + + """ + + def __init__( + self, + txnId=None, + writeId=None, + db=None, + table=None, + fileInfo=None, + partitionVals=None, + ): + self.txnId = txnId + self.writeId = writeId + self.db = db + self.table = table + self.fileInfo = fileInfo + self.partitionVals = partitionVals + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.I64: + self.txnId = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.I64: + self.writeId = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.db = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRING: + self.table = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.STRUCT: + self.fileInfo = InsertEventRequestData() + self.fileInfo.read(iprot) + else: + iprot.skip(ftype) + elif fid == 6: + if ftype == TType.LIST: + self.partitionVals = [] + (_etype854, _size851) = iprot.readListBegin() + for _i855 in range(_size851): + _elem856 = ( + iprot.readString().decode("utf-8", errors="replace") + if sys.version_info[0] == 2 + else iprot.readString() + ) + self.partitionVals.append(_elem856) + iprot.readListEnd() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("WriteNotificationLogRequest") + if self.txnId is not None: + oprot.writeFieldBegin("txnId", TType.I64, 1) + oprot.writeI64(self.txnId) + oprot.writeFieldEnd() + if self.writeId is not None: + oprot.writeFieldBegin("writeId", TType.I64, 2) + oprot.writeI64(self.writeId) + oprot.writeFieldEnd() + if self.db is not None: + oprot.writeFieldBegin("db", TType.STRING, 3) + oprot.writeString(self.db.encode("utf-8") if sys.version_info[0] == 2 else self.db) + oprot.writeFieldEnd() + if self.table is not None: + oprot.writeFieldBegin("table", TType.STRING, 4) + oprot.writeString(self.table.encode("utf-8") if sys.version_info[0] == 2 else self.table) + oprot.writeFieldEnd() + if self.fileInfo is not None: + oprot.writeFieldBegin("fileInfo", TType.STRUCT, 5) + self.fileInfo.write(oprot) + oprot.writeFieldEnd() + if self.partitionVals is not None: + oprot.writeFieldBegin("partitionVals", TType.LIST, 6) + oprot.writeListBegin(TType.STRING, len(self.partitionVals)) + for iter857 in self.partitionVals: + oprot.writeString(iter857.encode("utf-8") if sys.version_info[0] == 2 else iter857) + oprot.writeListEnd() + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.txnId is None: + raise TProtocolException(message="Required field txnId is unset!") + if self.writeId is None: + raise TProtocolException(message="Required field writeId is unset!") + if self.db is None: + raise TProtocolException(message="Required field db is unset!") + if self.table is None: + raise TProtocolException(message="Required field table is unset!") + if self.fileInfo is None: + raise TProtocolException(message="Required field fileInfo is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class WriteNotificationLogResponse: + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("WriteNotificationLogResponse") + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class WriteNotificationLogBatchRequest: + """ + Attributes: + - catalog + - db + - table + - requestList + + """ + + def __init__( + self, + catalog=None, + db=None, + table=None, + requestList=None, + ): + self.catalog = catalog + self.db = db + self.table = table + self.requestList = requestList + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.catalog = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.db = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.table = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.LIST: + self.requestList = [] + (_etype861, _size858) = iprot.readListBegin() + for _i862 in range(_size858): + _elem863 = WriteNotificationLogRequest() + _elem863.read(iprot) + self.requestList.append(_elem863) + iprot.readListEnd() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("WriteNotificationLogBatchRequest") + if self.catalog is not None: + oprot.writeFieldBegin("catalog", TType.STRING, 1) + oprot.writeString(self.catalog.encode("utf-8") if sys.version_info[0] == 2 else self.catalog) + oprot.writeFieldEnd() + if self.db is not None: + oprot.writeFieldBegin("db", TType.STRING, 2) + oprot.writeString(self.db.encode("utf-8") if sys.version_info[0] == 2 else self.db) + oprot.writeFieldEnd() + if self.table is not None: + oprot.writeFieldBegin("table", TType.STRING, 3) + oprot.writeString(self.table.encode("utf-8") if sys.version_info[0] == 2 else self.table) + oprot.writeFieldEnd() + if self.requestList is not None: + oprot.writeFieldBegin("requestList", TType.LIST, 4) + oprot.writeListBegin(TType.STRUCT, len(self.requestList)) + for iter864 in self.requestList: + iter864.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.catalog is None: + raise TProtocolException(message="Required field catalog is unset!") + if self.db is None: + raise TProtocolException(message="Required field db is unset!") + if self.table is None: + raise TProtocolException(message="Required field table is unset!") + if self.requestList is None: + raise TProtocolException(message="Required field requestList is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class WriteNotificationLogBatchResponse: + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("WriteNotificationLogBatchResponse") + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class MetadataPpdResult: + """ + Attributes: + - metadata + - includeBitset + + """ + + def __init__( + self, + metadata=None, + includeBitset=None, + ): + self.metadata = metadata + self.includeBitset = includeBitset + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.metadata = iprot.readBinary() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.includeBitset = iprot.readBinary() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("MetadataPpdResult") + if self.metadata is not None: + oprot.writeFieldBegin("metadata", TType.STRING, 1) + oprot.writeBinary(self.metadata) + oprot.writeFieldEnd() + if self.includeBitset is not None: + oprot.writeFieldBegin("includeBitset", TType.STRING, 2) + oprot.writeBinary(self.includeBitset) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class GetFileMetadataByExprResult: + """ + Attributes: + - metadata + - isSupported + + """ + + def __init__( + self, + metadata=None, + isSupported=None, + ): + self.metadata = metadata + self.isSupported = isSupported + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.MAP: + self.metadata = {} + (_ktype866, _vtype867, _size865) = iprot.readMapBegin() + for _i869 in range(_size865): + _key870 = iprot.readI64() + _val871 = MetadataPpdResult() + _val871.read(iprot) + self.metadata[_key870] = _val871 + iprot.readMapEnd() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.BOOL: + self.isSupported = iprot.readBool() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("GetFileMetadataByExprResult") + if self.metadata is not None: + oprot.writeFieldBegin("metadata", TType.MAP, 1) + oprot.writeMapBegin(TType.I64, TType.STRUCT, len(self.metadata)) + for kiter872, viter873 in self.metadata.items(): + oprot.writeI64(kiter872) + viter873.write(oprot) + oprot.writeMapEnd() + oprot.writeFieldEnd() + if self.isSupported is not None: + oprot.writeFieldBegin("isSupported", TType.BOOL, 2) + oprot.writeBool(self.isSupported) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.metadata is None: + raise TProtocolException(message="Required field metadata is unset!") + if self.isSupported is None: + raise TProtocolException(message="Required field isSupported is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class GetFileMetadataByExprRequest: + """ + Attributes: + - fileIds + - expr + - doGetFooters + - type + + """ + + def __init__( + self, + fileIds=None, + expr=None, + doGetFooters=None, + type=None, + ): + self.fileIds = fileIds + self.expr = expr + self.doGetFooters = doGetFooters + self.type = type + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.LIST: + self.fileIds = [] + (_etype877, _size874) = iprot.readListBegin() + for _i878 in range(_size874): + _elem879 = iprot.readI64() + self.fileIds.append(_elem879) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.expr = iprot.readBinary() + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.BOOL: + self.doGetFooters = iprot.readBool() + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.I32: + self.type = iprot.readI32() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("GetFileMetadataByExprRequest") + if self.fileIds is not None: + oprot.writeFieldBegin("fileIds", TType.LIST, 1) + oprot.writeListBegin(TType.I64, len(self.fileIds)) + for iter880 in self.fileIds: + oprot.writeI64(iter880) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.expr is not None: + oprot.writeFieldBegin("expr", TType.STRING, 2) + oprot.writeBinary(self.expr) + oprot.writeFieldEnd() + if self.doGetFooters is not None: + oprot.writeFieldBegin("doGetFooters", TType.BOOL, 3) + oprot.writeBool(self.doGetFooters) + oprot.writeFieldEnd() + if self.type is not None: + oprot.writeFieldBegin("type", TType.I32, 4) + oprot.writeI32(self.type) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.fileIds is None: + raise TProtocolException(message="Required field fileIds is unset!") + if self.expr is None: + raise TProtocolException(message="Required field expr is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class GetFileMetadataResult: + """ + Attributes: + - metadata + - isSupported + + """ + + def __init__( + self, + metadata=None, + isSupported=None, + ): + self.metadata = metadata + self.isSupported = isSupported + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.MAP: + self.metadata = {} + (_ktype882, _vtype883, _size881) = iprot.readMapBegin() + for _i885 in range(_size881): + _key886 = iprot.readI64() + _val887 = iprot.readBinary() + self.metadata[_key886] = _val887 + iprot.readMapEnd() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.BOOL: + self.isSupported = iprot.readBool() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("GetFileMetadataResult") + if self.metadata is not None: + oprot.writeFieldBegin("metadata", TType.MAP, 1) + oprot.writeMapBegin(TType.I64, TType.STRING, len(self.metadata)) + for kiter888, viter889 in self.metadata.items(): + oprot.writeI64(kiter888) + oprot.writeBinary(viter889) + oprot.writeMapEnd() + oprot.writeFieldEnd() + if self.isSupported is not None: + oprot.writeFieldBegin("isSupported", TType.BOOL, 2) + oprot.writeBool(self.isSupported) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.metadata is None: + raise TProtocolException(message="Required field metadata is unset!") + if self.isSupported is None: + raise TProtocolException(message="Required field isSupported is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class GetFileMetadataRequest: + """ + Attributes: + - fileIds + + """ + + def __init__( + self, + fileIds=None, + ): + self.fileIds = fileIds + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.LIST: + self.fileIds = [] + (_etype893, _size890) = iprot.readListBegin() + for _i894 in range(_size890): + _elem895 = iprot.readI64() + self.fileIds.append(_elem895) + iprot.readListEnd() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("GetFileMetadataRequest") + if self.fileIds is not None: + oprot.writeFieldBegin("fileIds", TType.LIST, 1) + oprot.writeListBegin(TType.I64, len(self.fileIds)) + for iter896 in self.fileIds: + oprot.writeI64(iter896) + oprot.writeListEnd() + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.fileIds is None: + raise TProtocolException(message="Required field fileIds is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class PutFileMetadataResult: + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("PutFileMetadataResult") + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class PutFileMetadataRequest: + """ + Attributes: + - fileIds + - metadata + - type + + """ + + def __init__( + self, + fileIds=None, + metadata=None, + type=None, + ): + self.fileIds = fileIds + self.metadata = metadata + self.type = type + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.LIST: + self.fileIds = [] + (_etype900, _size897) = iprot.readListBegin() + for _i901 in range(_size897): + _elem902 = iprot.readI64() + self.fileIds.append(_elem902) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.LIST: + self.metadata = [] + (_etype906, _size903) = iprot.readListBegin() + for _i907 in range(_size903): + _elem908 = iprot.readBinary() + self.metadata.append(_elem908) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.I32: + self.type = iprot.readI32() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("PutFileMetadataRequest") + if self.fileIds is not None: + oprot.writeFieldBegin("fileIds", TType.LIST, 1) + oprot.writeListBegin(TType.I64, len(self.fileIds)) + for iter909 in self.fileIds: + oprot.writeI64(iter909) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.metadata is not None: + oprot.writeFieldBegin("metadata", TType.LIST, 2) + oprot.writeListBegin(TType.STRING, len(self.metadata)) + for iter910 in self.metadata: + oprot.writeBinary(iter910) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.type is not None: + oprot.writeFieldBegin("type", TType.I32, 3) + oprot.writeI32(self.type) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.fileIds is None: + raise TProtocolException(message="Required field fileIds is unset!") + if self.metadata is None: + raise TProtocolException(message="Required field metadata is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class ClearFileMetadataResult: + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("ClearFileMetadataResult") + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class ClearFileMetadataRequest: + """ + Attributes: + - fileIds + + """ + + def __init__( + self, + fileIds=None, + ): + self.fileIds = fileIds + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.LIST: + self.fileIds = [] + (_etype914, _size911) = iprot.readListBegin() + for _i915 in range(_size911): + _elem916 = iprot.readI64() + self.fileIds.append(_elem916) + iprot.readListEnd() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("ClearFileMetadataRequest") + if self.fileIds is not None: + oprot.writeFieldBegin("fileIds", TType.LIST, 1) + oprot.writeListBegin(TType.I64, len(self.fileIds)) + for iter917 in self.fileIds: + oprot.writeI64(iter917) + oprot.writeListEnd() + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.fileIds is None: + raise TProtocolException(message="Required field fileIds is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class CacheFileMetadataResult: + """ + Attributes: + - isSupported + + """ + + def __init__( + self, + isSupported=None, + ): + self.isSupported = isSupported + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.BOOL: + self.isSupported = iprot.readBool() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("CacheFileMetadataResult") + if self.isSupported is not None: + oprot.writeFieldBegin("isSupported", TType.BOOL, 1) + oprot.writeBool(self.isSupported) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.isSupported is None: + raise TProtocolException(message="Required field isSupported is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class CacheFileMetadataRequest: + """ + Attributes: + - dbName + - tblName + - partName + - isAllParts + + """ + + def __init__( + self, + dbName=None, + tblName=None, + partName=None, + isAllParts=None, + ): + self.dbName = dbName + self.tblName = tblName + self.partName = partName + self.isAllParts = isAllParts + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.dbName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.tblName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.partName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.BOOL: + self.isAllParts = iprot.readBool() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("CacheFileMetadataRequest") + if self.dbName is not None: + oprot.writeFieldBegin("dbName", TType.STRING, 1) + oprot.writeString(self.dbName.encode("utf-8") if sys.version_info[0] == 2 else self.dbName) + oprot.writeFieldEnd() + if self.tblName is not None: + oprot.writeFieldBegin("tblName", TType.STRING, 2) + oprot.writeString(self.tblName.encode("utf-8") if sys.version_info[0] == 2 else self.tblName) + oprot.writeFieldEnd() + if self.partName is not None: + oprot.writeFieldBegin("partName", TType.STRING, 3) + oprot.writeString(self.partName.encode("utf-8") if sys.version_info[0] == 2 else self.partName) + oprot.writeFieldEnd() + if self.isAllParts is not None: + oprot.writeFieldBegin("isAllParts", TType.BOOL, 4) + oprot.writeBool(self.isAllParts) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.dbName is None: + raise TProtocolException(message="Required field dbName is unset!") + if self.tblName is None: + raise TProtocolException(message="Required field tblName is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class GetAllFunctionsResponse: + """ + Attributes: + - functions + + """ + + def __init__( + self, + functions=None, + ): + self.functions = functions + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.LIST: + self.functions = [] + (_etype921, _size918) = iprot.readListBegin() + for _i922 in range(_size918): + _elem923 = Function() + _elem923.read(iprot) + self.functions.append(_elem923) + iprot.readListEnd() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("GetAllFunctionsResponse") + if self.functions is not None: + oprot.writeFieldBegin("functions", TType.LIST, 1) + oprot.writeListBegin(TType.STRUCT, len(self.functions)) + for iter924 in self.functions: + iter924.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class ClientCapabilities: + """ + Attributes: + - values + + """ + + def __init__( + self, + values=None, + ): + self.values = values + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.LIST: + self.values = [] + (_etype928, _size925) = iprot.readListBegin() + for _i929 in range(_size925): + _elem930 = iprot.readI32() + self.values.append(_elem930) + iprot.readListEnd() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("ClientCapabilities") + if self.values is not None: + oprot.writeFieldBegin("values", TType.LIST, 1) + oprot.writeListBegin(TType.I32, len(self.values)) + for iter931 in self.values: + oprot.writeI32(iter931) + oprot.writeListEnd() + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.values is None: + raise TProtocolException(message="Required field values is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class GetProjectionsSpec: + """ + Attributes: + - fieldList + - includeParamKeyPattern + - excludeParamKeyPattern + + """ + + def __init__( + self, + fieldList=None, + includeParamKeyPattern=None, + excludeParamKeyPattern=None, + ): + self.fieldList = fieldList + self.includeParamKeyPattern = includeParamKeyPattern + self.excludeParamKeyPattern = excludeParamKeyPattern + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.LIST: + self.fieldList = [] + (_etype935, _size932) = iprot.readListBegin() + for _i936 in range(_size932): + _elem937 = ( + iprot.readString().decode("utf-8", errors="replace") + if sys.version_info[0] == 2 + else iprot.readString() + ) + self.fieldList.append(_elem937) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.includeParamKeyPattern = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.excludeParamKeyPattern = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("GetProjectionsSpec") + if self.fieldList is not None: + oprot.writeFieldBegin("fieldList", TType.LIST, 1) + oprot.writeListBegin(TType.STRING, len(self.fieldList)) + for iter938 in self.fieldList: + oprot.writeString(iter938.encode("utf-8") if sys.version_info[0] == 2 else iter938) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.includeParamKeyPattern is not None: + oprot.writeFieldBegin("includeParamKeyPattern", TType.STRING, 2) + oprot.writeString( + self.includeParamKeyPattern.encode("utf-8") if sys.version_info[0] == 2 else self.includeParamKeyPattern + ) + oprot.writeFieldEnd() + if self.excludeParamKeyPattern is not None: + oprot.writeFieldBegin("excludeParamKeyPattern", TType.STRING, 3) + oprot.writeString( + self.excludeParamKeyPattern.encode("utf-8") if sys.version_info[0] == 2 else self.excludeParamKeyPattern + ) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class GetTableRequest: + """ + Attributes: + - dbName + - tblName + - capabilities + - catName + - validWriteIdList + - getColumnStats + - processorCapabilities + - processorIdentifier + - engine + - id + + """ + + def __init__( + self, + dbName=None, + tblName=None, + capabilities=None, + catName=None, + validWriteIdList=None, + getColumnStats=None, + processorCapabilities=None, + processorIdentifier=None, + engine=None, + id=-1, + ): + self.dbName = dbName + self.tblName = tblName + self.capabilities = capabilities + self.catName = catName + self.validWriteIdList = validWriteIdList + self.getColumnStats = getColumnStats + self.processorCapabilities = processorCapabilities + self.processorIdentifier = processorIdentifier + self.engine = engine + self.id = id + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.dbName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.tblName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRUCT: + self.capabilities = ClientCapabilities() + self.capabilities.read(iprot) + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRING: + self.catName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 6: + if ftype == TType.STRING: + self.validWriteIdList = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 7: + if ftype == TType.BOOL: + self.getColumnStats = iprot.readBool() + else: + iprot.skip(ftype) + elif fid == 8: + if ftype == TType.LIST: + self.processorCapabilities = [] + (_etype942, _size939) = iprot.readListBegin() + for _i943 in range(_size939): + _elem944 = ( + iprot.readString().decode("utf-8", errors="replace") + if sys.version_info[0] == 2 + else iprot.readString() + ) + self.processorCapabilities.append(_elem944) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 9: + if ftype == TType.STRING: + self.processorIdentifier = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 10: + if ftype == TType.STRING: + self.engine = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 11: + if ftype == TType.I64: + self.id = iprot.readI64() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("GetTableRequest") + if self.dbName is not None: + oprot.writeFieldBegin("dbName", TType.STRING, 1) + oprot.writeString(self.dbName.encode("utf-8") if sys.version_info[0] == 2 else self.dbName) + oprot.writeFieldEnd() + if self.tblName is not None: + oprot.writeFieldBegin("tblName", TType.STRING, 2) + oprot.writeString(self.tblName.encode("utf-8") if sys.version_info[0] == 2 else self.tblName) + oprot.writeFieldEnd() + if self.capabilities is not None: + oprot.writeFieldBegin("capabilities", TType.STRUCT, 3) + self.capabilities.write(oprot) + oprot.writeFieldEnd() + if self.catName is not None: + oprot.writeFieldBegin("catName", TType.STRING, 4) + oprot.writeString(self.catName.encode("utf-8") if sys.version_info[0] == 2 else self.catName) + oprot.writeFieldEnd() + if self.validWriteIdList is not None: + oprot.writeFieldBegin("validWriteIdList", TType.STRING, 6) + oprot.writeString(self.validWriteIdList.encode("utf-8") if sys.version_info[0] == 2 else self.validWriteIdList) + oprot.writeFieldEnd() + if self.getColumnStats is not None: + oprot.writeFieldBegin("getColumnStats", TType.BOOL, 7) + oprot.writeBool(self.getColumnStats) + oprot.writeFieldEnd() + if self.processorCapabilities is not None: + oprot.writeFieldBegin("processorCapabilities", TType.LIST, 8) + oprot.writeListBegin(TType.STRING, len(self.processorCapabilities)) + for iter945 in self.processorCapabilities: + oprot.writeString(iter945.encode("utf-8") if sys.version_info[0] == 2 else iter945) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.processorIdentifier is not None: + oprot.writeFieldBegin("processorIdentifier", TType.STRING, 9) + oprot.writeString(self.processorIdentifier.encode("utf-8") if sys.version_info[0] == 2 else self.processorIdentifier) + oprot.writeFieldEnd() + if self.engine is not None: + oprot.writeFieldBegin("engine", TType.STRING, 10) + oprot.writeString(self.engine.encode("utf-8") if sys.version_info[0] == 2 else self.engine) + oprot.writeFieldEnd() + if self.id is not None: + oprot.writeFieldBegin("id", TType.I64, 11) + oprot.writeI64(self.id) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.dbName is None: + raise TProtocolException(message="Required field dbName is unset!") + if self.tblName is None: + raise TProtocolException(message="Required field tblName is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class GetTableResult: + """ + Attributes: + - table + - isStatsCompliant + + """ + + def __init__( + self, + table=None, + isStatsCompliant=None, + ): + self.table = table + self.isStatsCompliant = isStatsCompliant + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.table = Table() + self.table.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.BOOL: + self.isStatsCompliant = iprot.readBool() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("GetTableResult") + if self.table is not None: + oprot.writeFieldBegin("table", TType.STRUCT, 1) + self.table.write(oprot) + oprot.writeFieldEnd() + if self.isStatsCompliant is not None: + oprot.writeFieldBegin("isStatsCompliant", TType.BOOL, 2) + oprot.writeBool(self.isStatsCompliant) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.table is None: + raise TProtocolException(message="Required field table is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class GetTablesRequest: + """ + Attributes: + - dbName + - tblNames + - capabilities + - catName + - processorCapabilities + - processorIdentifier + - projectionSpec + - tablesPattern + + """ + + def __init__( + self, + dbName=None, + tblNames=None, + capabilities=None, + catName=None, + processorCapabilities=None, + processorIdentifier=None, + projectionSpec=None, + tablesPattern=None, + ): + self.dbName = dbName + self.tblNames = tblNames + self.capabilities = capabilities + self.catName = catName + self.processorCapabilities = processorCapabilities + self.processorIdentifier = processorIdentifier + self.projectionSpec = projectionSpec + self.tablesPattern = tablesPattern + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.dbName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.LIST: + self.tblNames = [] + (_etype949, _size946) = iprot.readListBegin() + for _i950 in range(_size946): + _elem951 = ( + iprot.readString().decode("utf-8", errors="replace") + if sys.version_info[0] == 2 + else iprot.readString() + ) + self.tblNames.append(_elem951) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRUCT: + self.capabilities = ClientCapabilities() + self.capabilities.read(iprot) + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRING: + self.catName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.LIST: + self.processorCapabilities = [] + (_etype955, _size952) = iprot.readListBegin() + for _i956 in range(_size952): + _elem957 = ( + iprot.readString().decode("utf-8", errors="replace") + if sys.version_info[0] == 2 + else iprot.readString() + ) + self.processorCapabilities.append(_elem957) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 6: + if ftype == TType.STRING: + self.processorIdentifier = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 7: + if ftype == TType.STRUCT: + self.projectionSpec = GetProjectionsSpec() + self.projectionSpec.read(iprot) + else: + iprot.skip(ftype) + elif fid == 8: + if ftype == TType.STRING: + self.tablesPattern = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("GetTablesRequest") + if self.dbName is not None: + oprot.writeFieldBegin("dbName", TType.STRING, 1) + oprot.writeString(self.dbName.encode("utf-8") if sys.version_info[0] == 2 else self.dbName) + oprot.writeFieldEnd() + if self.tblNames is not None: + oprot.writeFieldBegin("tblNames", TType.LIST, 2) + oprot.writeListBegin(TType.STRING, len(self.tblNames)) + for iter958 in self.tblNames: + oprot.writeString(iter958.encode("utf-8") if sys.version_info[0] == 2 else iter958) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.capabilities is not None: + oprot.writeFieldBegin("capabilities", TType.STRUCT, 3) + self.capabilities.write(oprot) + oprot.writeFieldEnd() + if self.catName is not None: + oprot.writeFieldBegin("catName", TType.STRING, 4) + oprot.writeString(self.catName.encode("utf-8") if sys.version_info[0] == 2 else self.catName) + oprot.writeFieldEnd() + if self.processorCapabilities is not None: + oprot.writeFieldBegin("processorCapabilities", TType.LIST, 5) + oprot.writeListBegin(TType.STRING, len(self.processorCapabilities)) + for iter959 in self.processorCapabilities: + oprot.writeString(iter959.encode("utf-8") if sys.version_info[0] == 2 else iter959) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.processorIdentifier is not None: + oprot.writeFieldBegin("processorIdentifier", TType.STRING, 6) + oprot.writeString(self.processorIdentifier.encode("utf-8") if sys.version_info[0] == 2 else self.processorIdentifier) + oprot.writeFieldEnd() + if self.projectionSpec is not None: + oprot.writeFieldBegin("projectionSpec", TType.STRUCT, 7) + self.projectionSpec.write(oprot) + oprot.writeFieldEnd() + if self.tablesPattern is not None: + oprot.writeFieldBegin("tablesPattern", TType.STRING, 8) + oprot.writeString(self.tablesPattern.encode("utf-8") if sys.version_info[0] == 2 else self.tablesPattern) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.dbName is None: + raise TProtocolException(message="Required field dbName is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class GetTablesResult: + """ + Attributes: + - tables + + """ + + def __init__( + self, + tables=None, + ): + self.tables = tables + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.LIST: + self.tables = [] + (_etype963, _size960) = iprot.readListBegin() + for _i964 in range(_size960): + _elem965 = Table() + _elem965.read(iprot) + self.tables.append(_elem965) + iprot.readListEnd() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("GetTablesResult") + if self.tables is not None: + oprot.writeFieldBegin("tables", TType.LIST, 1) + oprot.writeListBegin(TType.STRUCT, len(self.tables)) + for iter966 in self.tables: + iter966.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.tables is None: + raise TProtocolException(message="Required field tables is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class GetTablesExtRequest: + """ + Attributes: + - catalog + - database + - tableNamePattern + - requestedFields + - limit + - processorCapabilities + - processorIdentifier + + """ + + def __init__( + self, + catalog=None, + database=None, + tableNamePattern=None, + requestedFields=None, + limit=None, + processorCapabilities=None, + processorIdentifier=None, + ): + self.catalog = catalog + self.database = database + self.tableNamePattern = tableNamePattern + self.requestedFields = requestedFields + self.limit = limit + self.processorCapabilities = processorCapabilities + self.processorIdentifier = processorIdentifier + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.catalog = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.database = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.tableNamePattern = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.I32: + self.requestedFields = iprot.readI32() + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.I32: + self.limit = iprot.readI32() + else: + iprot.skip(ftype) + elif fid == 6: + if ftype == TType.LIST: + self.processorCapabilities = [] + (_etype970, _size967) = iprot.readListBegin() + for _i971 in range(_size967): + _elem972 = ( + iprot.readString().decode("utf-8", errors="replace") + if sys.version_info[0] == 2 + else iprot.readString() + ) + self.processorCapabilities.append(_elem972) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 7: + if ftype == TType.STRING: + self.processorIdentifier = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("GetTablesExtRequest") + if self.catalog is not None: + oprot.writeFieldBegin("catalog", TType.STRING, 1) + oprot.writeString(self.catalog.encode("utf-8") if sys.version_info[0] == 2 else self.catalog) + oprot.writeFieldEnd() + if self.database is not None: + oprot.writeFieldBegin("database", TType.STRING, 2) + oprot.writeString(self.database.encode("utf-8") if sys.version_info[0] == 2 else self.database) + oprot.writeFieldEnd() + if self.tableNamePattern is not None: + oprot.writeFieldBegin("tableNamePattern", TType.STRING, 3) + oprot.writeString(self.tableNamePattern.encode("utf-8") if sys.version_info[0] == 2 else self.tableNamePattern) + oprot.writeFieldEnd() + if self.requestedFields is not None: + oprot.writeFieldBegin("requestedFields", TType.I32, 4) + oprot.writeI32(self.requestedFields) + oprot.writeFieldEnd() + if self.limit is not None: + oprot.writeFieldBegin("limit", TType.I32, 5) + oprot.writeI32(self.limit) + oprot.writeFieldEnd() + if self.processorCapabilities is not None: + oprot.writeFieldBegin("processorCapabilities", TType.LIST, 6) + oprot.writeListBegin(TType.STRING, len(self.processorCapabilities)) + for iter973 in self.processorCapabilities: + oprot.writeString(iter973.encode("utf-8") if sys.version_info[0] == 2 else iter973) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.processorIdentifier is not None: + oprot.writeFieldBegin("processorIdentifier", TType.STRING, 7) + oprot.writeString(self.processorIdentifier.encode("utf-8") if sys.version_info[0] == 2 else self.processorIdentifier) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.catalog is None: + raise TProtocolException(message="Required field catalog is unset!") + if self.database is None: + raise TProtocolException(message="Required field database is unset!") + if self.tableNamePattern is None: + raise TProtocolException(message="Required field tableNamePattern is unset!") + if self.requestedFields is None: + raise TProtocolException(message="Required field requestedFields is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class ExtendedTableInfo: + """ + Attributes: + - tblName + - accessType + - requiredReadCapabilities + - requiredWriteCapabilities + + """ + + def __init__( + self, + tblName=None, + accessType=None, + requiredReadCapabilities=None, + requiredWriteCapabilities=None, + ): + self.tblName = tblName + self.accessType = accessType + self.requiredReadCapabilities = requiredReadCapabilities + self.requiredWriteCapabilities = requiredWriteCapabilities + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.tblName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.I32: + self.accessType = iprot.readI32() + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.LIST: + self.requiredReadCapabilities = [] + (_etype977, _size974) = iprot.readListBegin() + for _i978 in range(_size974): + _elem979 = ( + iprot.readString().decode("utf-8", errors="replace") + if sys.version_info[0] == 2 + else iprot.readString() + ) + self.requiredReadCapabilities.append(_elem979) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.LIST: + self.requiredWriteCapabilities = [] + (_etype983, _size980) = iprot.readListBegin() + for _i984 in range(_size980): + _elem985 = ( + iprot.readString().decode("utf-8", errors="replace") + if sys.version_info[0] == 2 + else iprot.readString() + ) + self.requiredWriteCapabilities.append(_elem985) + iprot.readListEnd() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("ExtendedTableInfo") + if self.tblName is not None: + oprot.writeFieldBegin("tblName", TType.STRING, 1) + oprot.writeString(self.tblName.encode("utf-8") if sys.version_info[0] == 2 else self.tblName) + oprot.writeFieldEnd() + if self.accessType is not None: + oprot.writeFieldBegin("accessType", TType.I32, 2) + oprot.writeI32(self.accessType) + oprot.writeFieldEnd() + if self.requiredReadCapabilities is not None: + oprot.writeFieldBegin("requiredReadCapabilities", TType.LIST, 3) + oprot.writeListBegin(TType.STRING, len(self.requiredReadCapabilities)) + for iter986 in self.requiredReadCapabilities: + oprot.writeString(iter986.encode("utf-8") if sys.version_info[0] == 2 else iter986) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.requiredWriteCapabilities is not None: + oprot.writeFieldBegin("requiredWriteCapabilities", TType.LIST, 4) + oprot.writeListBegin(TType.STRING, len(self.requiredWriteCapabilities)) + for iter987 in self.requiredWriteCapabilities: + oprot.writeString(iter987.encode("utf-8") if sys.version_info[0] == 2 else iter987) + oprot.writeListEnd() + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.tblName is None: + raise TProtocolException(message="Required field tblName is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class GetDatabaseRequest: + """ + Attributes: + - name + - catalogName + - processorCapabilities + - processorIdentifier + + """ + + def __init__( + self, + name=None, + catalogName=None, + processorCapabilities=None, + processorIdentifier=None, + ): + self.name = name + self.catalogName = catalogName + self.processorCapabilities = processorCapabilities + self.processorIdentifier = processorIdentifier + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.catalogName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.LIST: + self.processorCapabilities = [] + (_etype991, _size988) = iprot.readListBegin() + for _i992 in range(_size988): + _elem993 = ( + iprot.readString().decode("utf-8", errors="replace") + if sys.version_info[0] == 2 + else iprot.readString() + ) + self.processorCapabilities.append(_elem993) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRING: + self.processorIdentifier = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("GetDatabaseRequest") + if self.name is not None: + oprot.writeFieldBegin("name", TType.STRING, 1) + oprot.writeString(self.name.encode("utf-8") if sys.version_info[0] == 2 else self.name) + oprot.writeFieldEnd() + if self.catalogName is not None: + oprot.writeFieldBegin("catalogName", TType.STRING, 2) + oprot.writeString(self.catalogName.encode("utf-8") if sys.version_info[0] == 2 else self.catalogName) + oprot.writeFieldEnd() + if self.processorCapabilities is not None: + oprot.writeFieldBegin("processorCapabilities", TType.LIST, 3) + oprot.writeListBegin(TType.STRING, len(self.processorCapabilities)) + for iter994 in self.processorCapabilities: + oprot.writeString(iter994.encode("utf-8") if sys.version_info[0] == 2 else iter994) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.processorIdentifier is not None: + oprot.writeFieldBegin("processorIdentifier", TType.STRING, 4) + oprot.writeString(self.processorIdentifier.encode("utf-8") if sys.version_info[0] == 2 else self.processorIdentifier) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class DropDatabaseRequest: + """ + Attributes: + - name + - catalogName + - ignoreUnknownDb + - deleteData + - cascade + - softDelete + - txnId + - deleteManagedDir + + """ + + def __init__( + self, + name=None, + catalogName=None, + ignoreUnknownDb=None, + deleteData=None, + cascade=None, + softDelete=False, + txnId=0, + deleteManagedDir=True, + ): + self.name = name + self.catalogName = catalogName + self.ignoreUnknownDb = ignoreUnknownDb + self.deleteData = deleteData + self.cascade = cascade + self.softDelete = softDelete + self.txnId = txnId + self.deleteManagedDir = deleteManagedDir + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.catalogName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.BOOL: + self.ignoreUnknownDb = iprot.readBool() + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.BOOL: + self.deleteData = iprot.readBool() + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.BOOL: + self.cascade = iprot.readBool() + else: + iprot.skip(ftype) + elif fid == 6: + if ftype == TType.BOOL: + self.softDelete = iprot.readBool() + else: + iprot.skip(ftype) + elif fid == 7: + if ftype == TType.I64: + self.txnId = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 8: + if ftype == TType.BOOL: + self.deleteManagedDir = iprot.readBool() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("DropDatabaseRequest") + if self.name is not None: + oprot.writeFieldBegin("name", TType.STRING, 1) + oprot.writeString(self.name.encode("utf-8") if sys.version_info[0] == 2 else self.name) + oprot.writeFieldEnd() + if self.catalogName is not None: + oprot.writeFieldBegin("catalogName", TType.STRING, 2) + oprot.writeString(self.catalogName.encode("utf-8") if sys.version_info[0] == 2 else self.catalogName) + oprot.writeFieldEnd() + if self.ignoreUnknownDb is not None: + oprot.writeFieldBegin("ignoreUnknownDb", TType.BOOL, 3) + oprot.writeBool(self.ignoreUnknownDb) + oprot.writeFieldEnd() + if self.deleteData is not None: + oprot.writeFieldBegin("deleteData", TType.BOOL, 4) + oprot.writeBool(self.deleteData) + oprot.writeFieldEnd() + if self.cascade is not None: + oprot.writeFieldBegin("cascade", TType.BOOL, 5) + oprot.writeBool(self.cascade) + oprot.writeFieldEnd() + if self.softDelete is not None: + oprot.writeFieldBegin("softDelete", TType.BOOL, 6) + oprot.writeBool(self.softDelete) + oprot.writeFieldEnd() + if self.txnId is not None: + oprot.writeFieldBegin("txnId", TType.I64, 7) + oprot.writeI64(self.txnId) + oprot.writeFieldEnd() + if self.deleteManagedDir is not None: + oprot.writeFieldBegin("deleteManagedDir", TType.BOOL, 8) + oprot.writeBool(self.deleteManagedDir) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.name is None: + raise TProtocolException(message="Required field name is unset!") + if self.ignoreUnknownDb is None: + raise TProtocolException(message="Required field ignoreUnknownDb is unset!") + if self.deleteData is None: + raise TProtocolException(message="Required field deleteData is unset!") + if self.cascade is None: + raise TProtocolException(message="Required field cascade is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class CmRecycleRequest: + """ + Attributes: + - dataPath + - purge + + """ + + def __init__( + self, + dataPath=None, + purge=None, + ): + self.dataPath = dataPath + self.purge = purge + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.dataPath = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.BOOL: + self.purge = iprot.readBool() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("CmRecycleRequest") + if self.dataPath is not None: + oprot.writeFieldBegin("dataPath", TType.STRING, 1) + oprot.writeString(self.dataPath.encode("utf-8") if sys.version_info[0] == 2 else self.dataPath) + oprot.writeFieldEnd() + if self.purge is not None: + oprot.writeFieldBegin("purge", TType.BOOL, 2) + oprot.writeBool(self.purge) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.dataPath is None: + raise TProtocolException(message="Required field dataPath is unset!") + if self.purge is None: + raise TProtocolException(message="Required field purge is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class CmRecycleResponse: + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("CmRecycleResponse") + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class TableMeta: + """ + Attributes: + - dbName + - tableName + - tableType + - comments + - catName + + """ + + def __init__( + self, + dbName=None, + tableName=None, + tableType=None, + comments=None, + catName=None, + ): + self.dbName = dbName + self.tableName = tableName + self.tableType = tableType + self.comments = comments + self.catName = catName + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.dbName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.tableName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.tableType = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRING: + self.comments = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.STRING: + self.catName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("TableMeta") + if self.dbName is not None: + oprot.writeFieldBegin("dbName", TType.STRING, 1) + oprot.writeString(self.dbName.encode("utf-8") if sys.version_info[0] == 2 else self.dbName) + oprot.writeFieldEnd() + if self.tableName is not None: + oprot.writeFieldBegin("tableName", TType.STRING, 2) + oprot.writeString(self.tableName.encode("utf-8") if sys.version_info[0] == 2 else self.tableName) + oprot.writeFieldEnd() + if self.tableType is not None: + oprot.writeFieldBegin("tableType", TType.STRING, 3) + oprot.writeString(self.tableType.encode("utf-8") if sys.version_info[0] == 2 else self.tableType) + oprot.writeFieldEnd() + if self.comments is not None: + oprot.writeFieldBegin("comments", TType.STRING, 4) + oprot.writeString(self.comments.encode("utf-8") if sys.version_info[0] == 2 else self.comments) + oprot.writeFieldEnd() + if self.catName is not None: + oprot.writeFieldBegin("catName", TType.STRING, 5) + oprot.writeString(self.catName.encode("utf-8") if sys.version_info[0] == 2 else self.catName) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.dbName is None: + raise TProtocolException(message="Required field dbName is unset!") + if self.tableName is None: + raise TProtocolException(message="Required field tableName is unset!") + if self.tableType is None: + raise TProtocolException(message="Required field tableType is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class Materialization: + """ + Attributes: + - sourceTablesUpdateDeleteModified + - sourceTablesCompacted + + """ + + def __init__( + self, + sourceTablesUpdateDeleteModified=None, + sourceTablesCompacted=None, + ): + self.sourceTablesUpdateDeleteModified = sourceTablesUpdateDeleteModified + self.sourceTablesCompacted = sourceTablesCompacted + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.BOOL: + self.sourceTablesUpdateDeleteModified = iprot.readBool() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.BOOL: + self.sourceTablesCompacted = iprot.readBool() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("Materialization") + if self.sourceTablesUpdateDeleteModified is not None: + oprot.writeFieldBegin("sourceTablesUpdateDeleteModified", TType.BOOL, 1) + oprot.writeBool(self.sourceTablesUpdateDeleteModified) + oprot.writeFieldEnd() + if self.sourceTablesCompacted is not None: + oprot.writeFieldBegin("sourceTablesCompacted", TType.BOOL, 2) + oprot.writeBool(self.sourceTablesCompacted) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.sourceTablesUpdateDeleteModified is None: + raise TProtocolException(message="Required field sourceTablesUpdateDeleteModified is unset!") + if self.sourceTablesCompacted is None: + raise TProtocolException(message="Required field sourceTablesCompacted is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class WMResourcePlan: + """ + Attributes: + - name + - status + - queryParallelism + - defaultPoolPath + - ns + + """ + + def __init__( + self, + name=None, + status=None, + queryParallelism=None, + defaultPoolPath=None, + ns=None, + ): + self.name = name + self.status = status + self.queryParallelism = queryParallelism + self.defaultPoolPath = defaultPoolPath + self.ns = ns + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.I32: + self.status = iprot.readI32() + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.I32: + self.queryParallelism = iprot.readI32() + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRING: + self.defaultPoolPath = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.STRING: + self.ns = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("WMResourcePlan") + if self.name is not None: + oprot.writeFieldBegin("name", TType.STRING, 1) + oprot.writeString(self.name.encode("utf-8") if sys.version_info[0] == 2 else self.name) + oprot.writeFieldEnd() + if self.status is not None: + oprot.writeFieldBegin("status", TType.I32, 2) + oprot.writeI32(self.status) + oprot.writeFieldEnd() + if self.queryParallelism is not None: + oprot.writeFieldBegin("queryParallelism", TType.I32, 3) + oprot.writeI32(self.queryParallelism) + oprot.writeFieldEnd() + if self.defaultPoolPath is not None: + oprot.writeFieldBegin("defaultPoolPath", TType.STRING, 4) + oprot.writeString(self.defaultPoolPath.encode("utf-8") if sys.version_info[0] == 2 else self.defaultPoolPath) + oprot.writeFieldEnd() + if self.ns is not None: + oprot.writeFieldBegin("ns", TType.STRING, 5) + oprot.writeString(self.ns.encode("utf-8") if sys.version_info[0] == 2 else self.ns) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.name is None: + raise TProtocolException(message="Required field name is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class WMNullableResourcePlan: + """ + Attributes: + - name + - status + - queryParallelism + - isSetQueryParallelism + - defaultPoolPath + - isSetDefaultPoolPath + - ns + + """ + + def __init__( + self, + name=None, + status=None, + queryParallelism=None, + isSetQueryParallelism=None, + defaultPoolPath=None, + isSetDefaultPoolPath=None, + ns=None, + ): + self.name = name + self.status = status + self.queryParallelism = queryParallelism + self.isSetQueryParallelism = isSetQueryParallelism + self.defaultPoolPath = defaultPoolPath + self.isSetDefaultPoolPath = isSetDefaultPoolPath + self.ns = ns + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.I32: + self.status = iprot.readI32() + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.I32: + self.queryParallelism = iprot.readI32() + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.BOOL: + self.isSetQueryParallelism = iprot.readBool() + else: + iprot.skip(ftype) + elif fid == 6: + if ftype == TType.STRING: + self.defaultPoolPath = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 7: + if ftype == TType.BOOL: + self.isSetDefaultPoolPath = iprot.readBool() + else: + iprot.skip(ftype) + elif fid == 8: + if ftype == TType.STRING: + self.ns = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("WMNullableResourcePlan") + if self.name is not None: + oprot.writeFieldBegin("name", TType.STRING, 1) + oprot.writeString(self.name.encode("utf-8") if sys.version_info[0] == 2 else self.name) + oprot.writeFieldEnd() + if self.status is not None: + oprot.writeFieldBegin("status", TType.I32, 2) + oprot.writeI32(self.status) + oprot.writeFieldEnd() + if self.queryParallelism is not None: + oprot.writeFieldBegin("queryParallelism", TType.I32, 4) + oprot.writeI32(self.queryParallelism) + oprot.writeFieldEnd() + if self.isSetQueryParallelism is not None: + oprot.writeFieldBegin("isSetQueryParallelism", TType.BOOL, 5) + oprot.writeBool(self.isSetQueryParallelism) + oprot.writeFieldEnd() + if self.defaultPoolPath is not None: + oprot.writeFieldBegin("defaultPoolPath", TType.STRING, 6) + oprot.writeString(self.defaultPoolPath.encode("utf-8") if sys.version_info[0] == 2 else self.defaultPoolPath) + oprot.writeFieldEnd() + if self.isSetDefaultPoolPath is not None: + oprot.writeFieldBegin("isSetDefaultPoolPath", TType.BOOL, 7) + oprot.writeBool(self.isSetDefaultPoolPath) + oprot.writeFieldEnd() + if self.ns is not None: + oprot.writeFieldBegin("ns", TType.STRING, 8) + oprot.writeString(self.ns.encode("utf-8") if sys.version_info[0] == 2 else self.ns) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class WMPool: + """ + Attributes: + - resourcePlanName + - poolPath + - allocFraction + - queryParallelism + - schedulingPolicy + - ns + + """ + + def __init__( + self, + resourcePlanName=None, + poolPath=None, + allocFraction=None, + queryParallelism=None, + schedulingPolicy=None, + ns=None, + ): + self.resourcePlanName = resourcePlanName + self.poolPath = poolPath + self.allocFraction = allocFraction + self.queryParallelism = queryParallelism + self.schedulingPolicy = schedulingPolicy + self.ns = ns + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.resourcePlanName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.poolPath = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.DOUBLE: + self.allocFraction = iprot.readDouble() + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.I32: + self.queryParallelism = iprot.readI32() + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.STRING: + self.schedulingPolicy = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 6: + if ftype == TType.STRING: + self.ns = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("WMPool") + if self.resourcePlanName is not None: + oprot.writeFieldBegin("resourcePlanName", TType.STRING, 1) + oprot.writeString(self.resourcePlanName.encode("utf-8") if sys.version_info[0] == 2 else self.resourcePlanName) + oprot.writeFieldEnd() + if self.poolPath is not None: + oprot.writeFieldBegin("poolPath", TType.STRING, 2) + oprot.writeString(self.poolPath.encode("utf-8") if sys.version_info[0] == 2 else self.poolPath) + oprot.writeFieldEnd() + if self.allocFraction is not None: + oprot.writeFieldBegin("allocFraction", TType.DOUBLE, 3) + oprot.writeDouble(self.allocFraction) + oprot.writeFieldEnd() + if self.queryParallelism is not None: + oprot.writeFieldBegin("queryParallelism", TType.I32, 4) + oprot.writeI32(self.queryParallelism) + oprot.writeFieldEnd() + if self.schedulingPolicy is not None: + oprot.writeFieldBegin("schedulingPolicy", TType.STRING, 5) + oprot.writeString(self.schedulingPolicy.encode("utf-8") if sys.version_info[0] == 2 else self.schedulingPolicy) + oprot.writeFieldEnd() + if self.ns is not None: + oprot.writeFieldBegin("ns", TType.STRING, 6) + oprot.writeString(self.ns.encode("utf-8") if sys.version_info[0] == 2 else self.ns) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.resourcePlanName is None: + raise TProtocolException(message="Required field resourcePlanName is unset!") + if self.poolPath is None: + raise TProtocolException(message="Required field poolPath is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class WMNullablePool: + """ + Attributes: + - resourcePlanName + - poolPath + - allocFraction + - queryParallelism + - schedulingPolicy + - isSetSchedulingPolicy + - ns + + """ + + def __init__( + self, + resourcePlanName=None, + poolPath=None, + allocFraction=None, + queryParallelism=None, + schedulingPolicy=None, + isSetSchedulingPolicy=None, + ns=None, + ): + self.resourcePlanName = resourcePlanName + self.poolPath = poolPath + self.allocFraction = allocFraction + self.queryParallelism = queryParallelism + self.schedulingPolicy = schedulingPolicy + self.isSetSchedulingPolicy = isSetSchedulingPolicy + self.ns = ns + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.resourcePlanName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.poolPath = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.DOUBLE: + self.allocFraction = iprot.readDouble() + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.I32: + self.queryParallelism = iprot.readI32() + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.STRING: + self.schedulingPolicy = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 6: + if ftype == TType.BOOL: + self.isSetSchedulingPolicy = iprot.readBool() + else: + iprot.skip(ftype) + elif fid == 7: + if ftype == TType.STRING: + self.ns = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("WMNullablePool") + if self.resourcePlanName is not None: + oprot.writeFieldBegin("resourcePlanName", TType.STRING, 1) + oprot.writeString(self.resourcePlanName.encode("utf-8") if sys.version_info[0] == 2 else self.resourcePlanName) + oprot.writeFieldEnd() + if self.poolPath is not None: + oprot.writeFieldBegin("poolPath", TType.STRING, 2) + oprot.writeString(self.poolPath.encode("utf-8") if sys.version_info[0] == 2 else self.poolPath) + oprot.writeFieldEnd() + if self.allocFraction is not None: + oprot.writeFieldBegin("allocFraction", TType.DOUBLE, 3) + oprot.writeDouble(self.allocFraction) + oprot.writeFieldEnd() + if self.queryParallelism is not None: + oprot.writeFieldBegin("queryParallelism", TType.I32, 4) + oprot.writeI32(self.queryParallelism) + oprot.writeFieldEnd() + if self.schedulingPolicy is not None: + oprot.writeFieldBegin("schedulingPolicy", TType.STRING, 5) + oprot.writeString(self.schedulingPolicy.encode("utf-8") if sys.version_info[0] == 2 else self.schedulingPolicy) + oprot.writeFieldEnd() + if self.isSetSchedulingPolicy is not None: + oprot.writeFieldBegin("isSetSchedulingPolicy", TType.BOOL, 6) + oprot.writeBool(self.isSetSchedulingPolicy) + oprot.writeFieldEnd() + if self.ns is not None: + oprot.writeFieldBegin("ns", TType.STRING, 7) + oprot.writeString(self.ns.encode("utf-8") if sys.version_info[0] == 2 else self.ns) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.resourcePlanName is None: + raise TProtocolException(message="Required field resourcePlanName is unset!") + if self.poolPath is None: + raise TProtocolException(message="Required field poolPath is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class WMTrigger: + """ + Attributes: + - resourcePlanName + - triggerName + - triggerExpression + - actionExpression + - isInUnmanaged + - ns + + """ + + def __init__( + self, + resourcePlanName=None, + triggerName=None, + triggerExpression=None, + actionExpression=None, + isInUnmanaged=None, + ns=None, + ): + self.resourcePlanName = resourcePlanName + self.triggerName = triggerName + self.triggerExpression = triggerExpression + self.actionExpression = actionExpression + self.isInUnmanaged = isInUnmanaged + self.ns = ns + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.resourcePlanName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.triggerName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.triggerExpression = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRING: + self.actionExpression = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.BOOL: + self.isInUnmanaged = iprot.readBool() + else: + iprot.skip(ftype) + elif fid == 6: + if ftype == TType.STRING: + self.ns = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("WMTrigger") + if self.resourcePlanName is not None: + oprot.writeFieldBegin("resourcePlanName", TType.STRING, 1) + oprot.writeString(self.resourcePlanName.encode("utf-8") if sys.version_info[0] == 2 else self.resourcePlanName) + oprot.writeFieldEnd() + if self.triggerName is not None: + oprot.writeFieldBegin("triggerName", TType.STRING, 2) + oprot.writeString(self.triggerName.encode("utf-8") if sys.version_info[0] == 2 else self.triggerName) + oprot.writeFieldEnd() + if self.triggerExpression is not None: + oprot.writeFieldBegin("triggerExpression", TType.STRING, 3) + oprot.writeString(self.triggerExpression.encode("utf-8") if sys.version_info[0] == 2 else self.triggerExpression) + oprot.writeFieldEnd() + if self.actionExpression is not None: + oprot.writeFieldBegin("actionExpression", TType.STRING, 4) + oprot.writeString(self.actionExpression.encode("utf-8") if sys.version_info[0] == 2 else self.actionExpression) + oprot.writeFieldEnd() + if self.isInUnmanaged is not None: + oprot.writeFieldBegin("isInUnmanaged", TType.BOOL, 5) + oprot.writeBool(self.isInUnmanaged) + oprot.writeFieldEnd() + if self.ns is not None: + oprot.writeFieldBegin("ns", TType.STRING, 6) + oprot.writeString(self.ns.encode("utf-8") if sys.version_info[0] == 2 else self.ns) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.resourcePlanName is None: + raise TProtocolException(message="Required field resourcePlanName is unset!") + if self.triggerName is None: + raise TProtocolException(message="Required field triggerName is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class WMMapping: + """ + Attributes: + - resourcePlanName + - entityType + - entityName + - poolPath + - ordering + - ns + + """ + + def __init__( + self, + resourcePlanName=None, + entityType=None, + entityName=None, + poolPath=None, + ordering=None, + ns=None, + ): + self.resourcePlanName = resourcePlanName + self.entityType = entityType + self.entityName = entityName + self.poolPath = poolPath + self.ordering = ordering + self.ns = ns + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.resourcePlanName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.entityType = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.entityName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRING: + self.poolPath = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.I32: + self.ordering = iprot.readI32() + else: + iprot.skip(ftype) + elif fid == 6: + if ftype == TType.STRING: + self.ns = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("WMMapping") + if self.resourcePlanName is not None: + oprot.writeFieldBegin("resourcePlanName", TType.STRING, 1) + oprot.writeString(self.resourcePlanName.encode("utf-8") if sys.version_info[0] == 2 else self.resourcePlanName) + oprot.writeFieldEnd() + if self.entityType is not None: + oprot.writeFieldBegin("entityType", TType.STRING, 2) + oprot.writeString(self.entityType.encode("utf-8") if sys.version_info[0] == 2 else self.entityType) + oprot.writeFieldEnd() + if self.entityName is not None: + oprot.writeFieldBegin("entityName", TType.STRING, 3) + oprot.writeString(self.entityName.encode("utf-8") if sys.version_info[0] == 2 else self.entityName) + oprot.writeFieldEnd() + if self.poolPath is not None: + oprot.writeFieldBegin("poolPath", TType.STRING, 4) + oprot.writeString(self.poolPath.encode("utf-8") if sys.version_info[0] == 2 else self.poolPath) + oprot.writeFieldEnd() + if self.ordering is not None: + oprot.writeFieldBegin("ordering", TType.I32, 5) + oprot.writeI32(self.ordering) + oprot.writeFieldEnd() + if self.ns is not None: + oprot.writeFieldBegin("ns", TType.STRING, 6) + oprot.writeString(self.ns.encode("utf-8") if sys.version_info[0] == 2 else self.ns) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.resourcePlanName is None: + raise TProtocolException(message="Required field resourcePlanName is unset!") + if self.entityType is None: + raise TProtocolException(message="Required field entityType is unset!") + if self.entityName is None: + raise TProtocolException(message="Required field entityName is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class WMPoolTrigger: + """ + Attributes: + - pool + - trigger + - ns + + """ + + def __init__( + self, + pool=None, + trigger=None, + ns=None, + ): + self.pool = pool + self.trigger = trigger + self.ns = ns + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.pool = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.trigger = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.ns = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("WMPoolTrigger") + if self.pool is not None: + oprot.writeFieldBegin("pool", TType.STRING, 1) + oprot.writeString(self.pool.encode("utf-8") if sys.version_info[0] == 2 else self.pool) + oprot.writeFieldEnd() + if self.trigger is not None: + oprot.writeFieldBegin("trigger", TType.STRING, 2) + oprot.writeString(self.trigger.encode("utf-8") if sys.version_info[0] == 2 else self.trigger) + oprot.writeFieldEnd() + if self.ns is not None: + oprot.writeFieldBegin("ns", TType.STRING, 3) + oprot.writeString(self.ns.encode("utf-8") if sys.version_info[0] == 2 else self.ns) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.pool is None: + raise TProtocolException(message="Required field pool is unset!") + if self.trigger is None: + raise TProtocolException(message="Required field trigger is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class WMFullResourcePlan: + """ + Attributes: + - plan + - pools + - mappings + - triggers + - poolTriggers + + """ + + def __init__( + self, + plan=None, + pools=None, + mappings=None, + triggers=None, + poolTriggers=None, + ): + self.plan = plan + self.pools = pools + self.mappings = mappings + self.triggers = triggers + self.poolTriggers = poolTriggers + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.plan = WMResourcePlan() + self.plan.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.LIST: + self.pools = [] + (_etype998, _size995) = iprot.readListBegin() + for _i999 in range(_size995): + _elem1000 = WMPool() + _elem1000.read(iprot) + self.pools.append(_elem1000) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.LIST: + self.mappings = [] + (_etype1004, _size1001) = iprot.readListBegin() + for _i1005 in range(_size1001): + _elem1006 = WMMapping() + _elem1006.read(iprot) + self.mappings.append(_elem1006) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.LIST: + self.triggers = [] + (_etype1010, _size1007) = iprot.readListBegin() + for _i1011 in range(_size1007): + _elem1012 = WMTrigger() + _elem1012.read(iprot) + self.triggers.append(_elem1012) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.LIST: + self.poolTriggers = [] + (_etype1016, _size1013) = iprot.readListBegin() + for _i1017 in range(_size1013): + _elem1018 = WMPoolTrigger() + _elem1018.read(iprot) + self.poolTriggers.append(_elem1018) + iprot.readListEnd() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("WMFullResourcePlan") + if self.plan is not None: + oprot.writeFieldBegin("plan", TType.STRUCT, 1) + self.plan.write(oprot) + oprot.writeFieldEnd() + if self.pools is not None: + oprot.writeFieldBegin("pools", TType.LIST, 2) + oprot.writeListBegin(TType.STRUCT, len(self.pools)) + for iter1019 in self.pools: + iter1019.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.mappings is not None: + oprot.writeFieldBegin("mappings", TType.LIST, 3) + oprot.writeListBegin(TType.STRUCT, len(self.mappings)) + for iter1020 in self.mappings: + iter1020.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.triggers is not None: + oprot.writeFieldBegin("triggers", TType.LIST, 4) + oprot.writeListBegin(TType.STRUCT, len(self.triggers)) + for iter1021 in self.triggers: + iter1021.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.poolTriggers is not None: + oprot.writeFieldBegin("poolTriggers", TType.LIST, 5) + oprot.writeListBegin(TType.STRUCT, len(self.poolTriggers)) + for iter1022 in self.poolTriggers: + iter1022.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.plan is None: + raise TProtocolException(message="Required field plan is unset!") + if self.pools is None: + raise TProtocolException(message="Required field pools is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class WMCreateResourcePlanRequest: + """ + Attributes: + - resourcePlan + - copyFrom + + """ + + def __init__( + self, + resourcePlan=None, + copyFrom=None, + ): + self.resourcePlan = resourcePlan + self.copyFrom = copyFrom + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.resourcePlan = WMResourcePlan() + self.resourcePlan.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.copyFrom = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("WMCreateResourcePlanRequest") + if self.resourcePlan is not None: + oprot.writeFieldBegin("resourcePlan", TType.STRUCT, 1) + self.resourcePlan.write(oprot) + oprot.writeFieldEnd() + if self.copyFrom is not None: + oprot.writeFieldBegin("copyFrom", TType.STRING, 2) + oprot.writeString(self.copyFrom.encode("utf-8") if sys.version_info[0] == 2 else self.copyFrom) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class WMCreateResourcePlanResponse: + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("WMCreateResourcePlanResponse") + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class WMGetActiveResourcePlanRequest: + """ + Attributes: + - ns + + """ + + def __init__( + self, + ns=None, + ): + self.ns = ns + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.ns = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("WMGetActiveResourcePlanRequest") + if self.ns is not None: + oprot.writeFieldBegin("ns", TType.STRING, 1) + oprot.writeString(self.ns.encode("utf-8") if sys.version_info[0] == 2 else self.ns) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class WMGetActiveResourcePlanResponse: + """ + Attributes: + - resourcePlan + + """ + + def __init__( + self, + resourcePlan=None, + ): + self.resourcePlan = resourcePlan + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.resourcePlan = WMFullResourcePlan() + self.resourcePlan.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("WMGetActiveResourcePlanResponse") + if self.resourcePlan is not None: + oprot.writeFieldBegin("resourcePlan", TType.STRUCT, 1) + self.resourcePlan.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class WMGetResourcePlanRequest: + """ + Attributes: + - resourcePlanName + - ns + + """ + + def __init__( + self, + resourcePlanName=None, + ns=None, + ): + self.resourcePlanName = resourcePlanName + self.ns = ns + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.resourcePlanName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.ns = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("WMGetResourcePlanRequest") + if self.resourcePlanName is not None: + oprot.writeFieldBegin("resourcePlanName", TType.STRING, 1) + oprot.writeString(self.resourcePlanName.encode("utf-8") if sys.version_info[0] == 2 else self.resourcePlanName) + oprot.writeFieldEnd() + if self.ns is not None: + oprot.writeFieldBegin("ns", TType.STRING, 2) + oprot.writeString(self.ns.encode("utf-8") if sys.version_info[0] == 2 else self.ns) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class WMGetResourcePlanResponse: + """ + Attributes: + - resourcePlan + + """ + + def __init__( + self, + resourcePlan=None, + ): + self.resourcePlan = resourcePlan + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.resourcePlan = WMFullResourcePlan() + self.resourcePlan.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("WMGetResourcePlanResponse") + if self.resourcePlan is not None: + oprot.writeFieldBegin("resourcePlan", TType.STRUCT, 1) + self.resourcePlan.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class WMGetAllResourcePlanRequest: + """ + Attributes: + - ns + + """ + + def __init__( + self, + ns=None, + ): + self.ns = ns + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.ns = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("WMGetAllResourcePlanRequest") + if self.ns is not None: + oprot.writeFieldBegin("ns", TType.STRING, 1) + oprot.writeString(self.ns.encode("utf-8") if sys.version_info[0] == 2 else self.ns) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class WMGetAllResourcePlanResponse: + """ + Attributes: + - resourcePlans + + """ + + def __init__( + self, + resourcePlans=None, + ): + self.resourcePlans = resourcePlans + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.LIST: + self.resourcePlans = [] + (_etype1026, _size1023) = iprot.readListBegin() + for _i1027 in range(_size1023): + _elem1028 = WMResourcePlan() + _elem1028.read(iprot) + self.resourcePlans.append(_elem1028) + iprot.readListEnd() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("WMGetAllResourcePlanResponse") + if self.resourcePlans is not None: + oprot.writeFieldBegin("resourcePlans", TType.LIST, 1) + oprot.writeListBegin(TType.STRUCT, len(self.resourcePlans)) + for iter1029 in self.resourcePlans: + iter1029.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class WMAlterResourcePlanRequest: + """ + Attributes: + - resourcePlanName + - resourcePlan + - isEnableAndActivate + - isForceDeactivate + - isReplace + - ns + + """ + + def __init__( + self, + resourcePlanName=None, + resourcePlan=None, + isEnableAndActivate=None, + isForceDeactivate=None, + isReplace=None, + ns=None, + ): + self.resourcePlanName = resourcePlanName + self.resourcePlan = resourcePlan + self.isEnableAndActivate = isEnableAndActivate + self.isForceDeactivate = isForceDeactivate + self.isReplace = isReplace + self.ns = ns + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.resourcePlanName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.resourcePlan = WMNullableResourcePlan() + self.resourcePlan.read(iprot) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.BOOL: + self.isEnableAndActivate = iprot.readBool() + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.BOOL: + self.isForceDeactivate = iprot.readBool() + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.BOOL: + self.isReplace = iprot.readBool() + else: + iprot.skip(ftype) + elif fid == 6: + if ftype == TType.STRING: + self.ns = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("WMAlterResourcePlanRequest") + if self.resourcePlanName is not None: + oprot.writeFieldBegin("resourcePlanName", TType.STRING, 1) + oprot.writeString(self.resourcePlanName.encode("utf-8") if sys.version_info[0] == 2 else self.resourcePlanName) + oprot.writeFieldEnd() + if self.resourcePlan is not None: + oprot.writeFieldBegin("resourcePlan", TType.STRUCT, 2) + self.resourcePlan.write(oprot) + oprot.writeFieldEnd() + if self.isEnableAndActivate is not None: + oprot.writeFieldBegin("isEnableAndActivate", TType.BOOL, 3) + oprot.writeBool(self.isEnableAndActivate) + oprot.writeFieldEnd() + if self.isForceDeactivate is not None: + oprot.writeFieldBegin("isForceDeactivate", TType.BOOL, 4) + oprot.writeBool(self.isForceDeactivate) + oprot.writeFieldEnd() + if self.isReplace is not None: + oprot.writeFieldBegin("isReplace", TType.BOOL, 5) + oprot.writeBool(self.isReplace) + oprot.writeFieldEnd() + if self.ns is not None: + oprot.writeFieldBegin("ns", TType.STRING, 6) + oprot.writeString(self.ns.encode("utf-8") if sys.version_info[0] == 2 else self.ns) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class WMAlterResourcePlanResponse: + """ + Attributes: + - fullResourcePlan + + """ + + def __init__( + self, + fullResourcePlan=None, + ): + self.fullResourcePlan = fullResourcePlan + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.fullResourcePlan = WMFullResourcePlan() + self.fullResourcePlan.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("WMAlterResourcePlanResponse") + if self.fullResourcePlan is not None: + oprot.writeFieldBegin("fullResourcePlan", TType.STRUCT, 1) + self.fullResourcePlan.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class WMValidateResourcePlanRequest: + """ + Attributes: + - resourcePlanName + - ns + + """ + + def __init__( + self, + resourcePlanName=None, + ns=None, + ): + self.resourcePlanName = resourcePlanName + self.ns = ns + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.resourcePlanName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.ns = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("WMValidateResourcePlanRequest") + if self.resourcePlanName is not None: + oprot.writeFieldBegin("resourcePlanName", TType.STRING, 1) + oprot.writeString(self.resourcePlanName.encode("utf-8") if sys.version_info[0] == 2 else self.resourcePlanName) + oprot.writeFieldEnd() + if self.ns is not None: + oprot.writeFieldBegin("ns", TType.STRING, 2) + oprot.writeString(self.ns.encode("utf-8") if sys.version_info[0] == 2 else self.ns) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class WMValidateResourcePlanResponse: + """ + Attributes: + - errors + - warnings + + """ + + def __init__( + self, + errors=None, + warnings=None, + ): + self.errors = errors + self.warnings = warnings + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.LIST: + self.errors = [] + (_etype1033, _size1030) = iprot.readListBegin() + for _i1034 in range(_size1030): + _elem1035 = ( + iprot.readString().decode("utf-8", errors="replace") + if sys.version_info[0] == 2 + else iprot.readString() + ) + self.errors.append(_elem1035) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.LIST: + self.warnings = [] + (_etype1039, _size1036) = iprot.readListBegin() + for _i1040 in range(_size1036): + _elem1041 = ( + iprot.readString().decode("utf-8", errors="replace") + if sys.version_info[0] == 2 + else iprot.readString() + ) + self.warnings.append(_elem1041) + iprot.readListEnd() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("WMValidateResourcePlanResponse") + if self.errors is not None: + oprot.writeFieldBegin("errors", TType.LIST, 1) + oprot.writeListBegin(TType.STRING, len(self.errors)) + for iter1042 in self.errors: + oprot.writeString(iter1042.encode("utf-8") if sys.version_info[0] == 2 else iter1042) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.warnings is not None: + oprot.writeFieldBegin("warnings", TType.LIST, 2) + oprot.writeListBegin(TType.STRING, len(self.warnings)) + for iter1043 in self.warnings: + oprot.writeString(iter1043.encode("utf-8") if sys.version_info[0] == 2 else iter1043) + oprot.writeListEnd() + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class WMDropResourcePlanRequest: + """ + Attributes: + - resourcePlanName + - ns + + """ + + def __init__( + self, + resourcePlanName=None, + ns=None, + ): + self.resourcePlanName = resourcePlanName + self.ns = ns + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.resourcePlanName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.ns = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("WMDropResourcePlanRequest") + if self.resourcePlanName is not None: + oprot.writeFieldBegin("resourcePlanName", TType.STRING, 1) + oprot.writeString(self.resourcePlanName.encode("utf-8") if sys.version_info[0] == 2 else self.resourcePlanName) + oprot.writeFieldEnd() + if self.ns is not None: + oprot.writeFieldBegin("ns", TType.STRING, 2) + oprot.writeString(self.ns.encode("utf-8") if sys.version_info[0] == 2 else self.ns) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class WMDropResourcePlanResponse: + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("WMDropResourcePlanResponse") + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class WMCreateTriggerRequest: + """ + Attributes: + - trigger + + """ + + def __init__( + self, + trigger=None, + ): + self.trigger = trigger + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.trigger = WMTrigger() + self.trigger.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("WMCreateTriggerRequest") + if self.trigger is not None: + oprot.writeFieldBegin("trigger", TType.STRUCT, 1) + self.trigger.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class WMCreateTriggerResponse: + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("WMCreateTriggerResponse") + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class WMAlterTriggerRequest: + """ + Attributes: + - trigger + + """ + + def __init__( + self, + trigger=None, + ): + self.trigger = trigger + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.trigger = WMTrigger() + self.trigger.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("WMAlterTriggerRequest") + if self.trigger is not None: + oprot.writeFieldBegin("trigger", TType.STRUCT, 1) + self.trigger.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class WMAlterTriggerResponse: + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("WMAlterTriggerResponse") + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class WMDropTriggerRequest: + """ + Attributes: + - resourcePlanName + - triggerName + - ns + + """ + + def __init__( + self, + resourcePlanName=None, + triggerName=None, + ns=None, + ): + self.resourcePlanName = resourcePlanName + self.triggerName = triggerName + self.ns = ns + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.resourcePlanName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.triggerName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.ns = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("WMDropTriggerRequest") + if self.resourcePlanName is not None: + oprot.writeFieldBegin("resourcePlanName", TType.STRING, 1) + oprot.writeString(self.resourcePlanName.encode("utf-8") if sys.version_info[0] == 2 else self.resourcePlanName) + oprot.writeFieldEnd() + if self.triggerName is not None: + oprot.writeFieldBegin("triggerName", TType.STRING, 2) + oprot.writeString(self.triggerName.encode("utf-8") if sys.version_info[0] == 2 else self.triggerName) + oprot.writeFieldEnd() + if self.ns is not None: + oprot.writeFieldBegin("ns", TType.STRING, 3) + oprot.writeString(self.ns.encode("utf-8") if sys.version_info[0] == 2 else self.ns) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class WMDropTriggerResponse: + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("WMDropTriggerResponse") + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class WMGetTriggersForResourePlanRequest: + """ + Attributes: + - resourcePlanName + - ns + + """ + + def __init__( + self, + resourcePlanName=None, + ns=None, + ): + self.resourcePlanName = resourcePlanName + self.ns = ns + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.resourcePlanName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.ns = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("WMGetTriggersForResourePlanRequest") + if self.resourcePlanName is not None: + oprot.writeFieldBegin("resourcePlanName", TType.STRING, 1) + oprot.writeString(self.resourcePlanName.encode("utf-8") if sys.version_info[0] == 2 else self.resourcePlanName) + oprot.writeFieldEnd() + if self.ns is not None: + oprot.writeFieldBegin("ns", TType.STRING, 2) + oprot.writeString(self.ns.encode("utf-8") if sys.version_info[0] == 2 else self.ns) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class WMGetTriggersForResourePlanResponse: + """ + Attributes: + - triggers + + """ + + def __init__( + self, + triggers=None, + ): + self.triggers = triggers + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.LIST: + self.triggers = [] + (_etype1047, _size1044) = iprot.readListBegin() + for _i1048 in range(_size1044): + _elem1049 = WMTrigger() + _elem1049.read(iprot) + self.triggers.append(_elem1049) + iprot.readListEnd() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("WMGetTriggersForResourePlanResponse") + if self.triggers is not None: + oprot.writeFieldBegin("triggers", TType.LIST, 1) + oprot.writeListBegin(TType.STRUCT, len(self.triggers)) + for iter1050 in self.triggers: + iter1050.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class WMCreatePoolRequest: + """ + Attributes: + - pool + + """ + + def __init__( + self, + pool=None, + ): + self.pool = pool + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.pool = WMPool() + self.pool.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("WMCreatePoolRequest") + if self.pool is not None: + oprot.writeFieldBegin("pool", TType.STRUCT, 1) + self.pool.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class WMCreatePoolResponse: + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("WMCreatePoolResponse") + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class WMAlterPoolRequest: + """ + Attributes: + - pool + - poolPath + + """ + + def __init__( + self, + pool=None, + poolPath=None, + ): + self.pool = pool + self.poolPath = poolPath + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.pool = WMNullablePool() + self.pool.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.poolPath = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("WMAlterPoolRequest") + if self.pool is not None: + oprot.writeFieldBegin("pool", TType.STRUCT, 1) + self.pool.write(oprot) + oprot.writeFieldEnd() + if self.poolPath is not None: + oprot.writeFieldBegin("poolPath", TType.STRING, 2) + oprot.writeString(self.poolPath.encode("utf-8") if sys.version_info[0] == 2 else self.poolPath) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class WMAlterPoolResponse: + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("WMAlterPoolResponse") + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class WMDropPoolRequest: + """ + Attributes: + - resourcePlanName + - poolPath + - ns + + """ + + def __init__( + self, + resourcePlanName=None, + poolPath=None, + ns=None, + ): + self.resourcePlanName = resourcePlanName + self.poolPath = poolPath + self.ns = ns + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.resourcePlanName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.poolPath = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.ns = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("WMDropPoolRequest") + if self.resourcePlanName is not None: + oprot.writeFieldBegin("resourcePlanName", TType.STRING, 1) + oprot.writeString(self.resourcePlanName.encode("utf-8") if sys.version_info[0] == 2 else self.resourcePlanName) + oprot.writeFieldEnd() + if self.poolPath is not None: + oprot.writeFieldBegin("poolPath", TType.STRING, 2) + oprot.writeString(self.poolPath.encode("utf-8") if sys.version_info[0] == 2 else self.poolPath) + oprot.writeFieldEnd() + if self.ns is not None: + oprot.writeFieldBegin("ns", TType.STRING, 3) + oprot.writeString(self.ns.encode("utf-8") if sys.version_info[0] == 2 else self.ns) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class WMDropPoolResponse: + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("WMDropPoolResponse") + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class WMCreateOrUpdateMappingRequest: + """ + Attributes: + - mapping + - update + + """ + + def __init__( + self, + mapping=None, + update=None, + ): + self.mapping = mapping + self.update = update + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.mapping = WMMapping() + self.mapping.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.BOOL: + self.update = iprot.readBool() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("WMCreateOrUpdateMappingRequest") + if self.mapping is not None: + oprot.writeFieldBegin("mapping", TType.STRUCT, 1) + self.mapping.write(oprot) + oprot.writeFieldEnd() + if self.update is not None: + oprot.writeFieldBegin("update", TType.BOOL, 2) + oprot.writeBool(self.update) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class WMCreateOrUpdateMappingResponse: + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("WMCreateOrUpdateMappingResponse") + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class WMDropMappingRequest: + """ + Attributes: + - mapping + + """ + + def __init__( + self, + mapping=None, + ): + self.mapping = mapping + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.mapping = WMMapping() + self.mapping.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("WMDropMappingRequest") + if self.mapping is not None: + oprot.writeFieldBegin("mapping", TType.STRUCT, 1) + self.mapping.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class WMDropMappingResponse: + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("WMDropMappingResponse") + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class WMCreateOrDropTriggerToPoolMappingRequest: + """ + Attributes: + - resourcePlanName + - triggerName + - poolPath + - drop + - ns + + """ + + def __init__( + self, + resourcePlanName=None, + triggerName=None, + poolPath=None, + drop=None, + ns=None, + ): + self.resourcePlanName = resourcePlanName + self.triggerName = triggerName + self.poolPath = poolPath + self.drop = drop + self.ns = ns + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.resourcePlanName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.triggerName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.poolPath = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.BOOL: + self.drop = iprot.readBool() + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.STRING: + self.ns = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("WMCreateOrDropTriggerToPoolMappingRequest") + if self.resourcePlanName is not None: + oprot.writeFieldBegin("resourcePlanName", TType.STRING, 1) + oprot.writeString(self.resourcePlanName.encode("utf-8") if sys.version_info[0] == 2 else self.resourcePlanName) + oprot.writeFieldEnd() + if self.triggerName is not None: + oprot.writeFieldBegin("triggerName", TType.STRING, 2) + oprot.writeString(self.triggerName.encode("utf-8") if sys.version_info[0] == 2 else self.triggerName) + oprot.writeFieldEnd() + if self.poolPath is not None: + oprot.writeFieldBegin("poolPath", TType.STRING, 3) + oprot.writeString(self.poolPath.encode("utf-8") if sys.version_info[0] == 2 else self.poolPath) + oprot.writeFieldEnd() + if self.drop is not None: + oprot.writeFieldBegin("drop", TType.BOOL, 4) + oprot.writeBool(self.drop) + oprot.writeFieldEnd() + if self.ns is not None: + oprot.writeFieldBegin("ns", TType.STRING, 5) + oprot.writeString(self.ns.encode("utf-8") if sys.version_info[0] == 2 else self.ns) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class WMCreateOrDropTriggerToPoolMappingResponse: + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("WMCreateOrDropTriggerToPoolMappingResponse") + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class ISchema: + """ + Attributes: + - schemaType + - name + - catName + - dbName + - compatibility + - validationLevel + - canEvolve + - schemaGroup + - description + + """ + + def __init__( + self, + schemaType=None, + name=None, + catName=None, + dbName=None, + compatibility=None, + validationLevel=None, + canEvolve=None, + schemaGroup=None, + description=None, + ): + self.schemaType = schemaType + self.name = name + self.catName = catName + self.dbName = dbName + self.compatibility = compatibility + self.validationLevel = validationLevel + self.canEvolve = canEvolve + self.schemaGroup = schemaGroup + self.description = description + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.I32: + self.schemaType = iprot.readI32() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.catName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRING: + self.dbName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.I32: + self.compatibility = iprot.readI32() + else: + iprot.skip(ftype) + elif fid == 6: + if ftype == TType.I32: + self.validationLevel = iprot.readI32() + else: + iprot.skip(ftype) + elif fid == 7: + if ftype == TType.BOOL: + self.canEvolve = iprot.readBool() + else: + iprot.skip(ftype) + elif fid == 8: + if ftype == TType.STRING: + self.schemaGroup = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 9: + if ftype == TType.STRING: + self.description = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("ISchema") + if self.schemaType is not None: + oprot.writeFieldBegin("schemaType", TType.I32, 1) + oprot.writeI32(self.schemaType) + oprot.writeFieldEnd() + if self.name is not None: + oprot.writeFieldBegin("name", TType.STRING, 2) + oprot.writeString(self.name.encode("utf-8") if sys.version_info[0] == 2 else self.name) + oprot.writeFieldEnd() + if self.catName is not None: + oprot.writeFieldBegin("catName", TType.STRING, 3) + oprot.writeString(self.catName.encode("utf-8") if sys.version_info[0] == 2 else self.catName) + oprot.writeFieldEnd() + if self.dbName is not None: + oprot.writeFieldBegin("dbName", TType.STRING, 4) + oprot.writeString(self.dbName.encode("utf-8") if sys.version_info[0] == 2 else self.dbName) + oprot.writeFieldEnd() + if self.compatibility is not None: + oprot.writeFieldBegin("compatibility", TType.I32, 5) + oprot.writeI32(self.compatibility) + oprot.writeFieldEnd() + if self.validationLevel is not None: + oprot.writeFieldBegin("validationLevel", TType.I32, 6) + oprot.writeI32(self.validationLevel) + oprot.writeFieldEnd() + if self.canEvolve is not None: + oprot.writeFieldBegin("canEvolve", TType.BOOL, 7) + oprot.writeBool(self.canEvolve) + oprot.writeFieldEnd() + if self.schemaGroup is not None: + oprot.writeFieldBegin("schemaGroup", TType.STRING, 8) + oprot.writeString(self.schemaGroup.encode("utf-8") if sys.version_info[0] == 2 else self.schemaGroup) + oprot.writeFieldEnd() + if self.description is not None: + oprot.writeFieldBegin("description", TType.STRING, 9) + oprot.writeString(self.description.encode("utf-8") if sys.version_info[0] == 2 else self.description) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class ISchemaName: + """ + Attributes: + - catName + - dbName + - schemaName + + """ + + def __init__( + self, + catName=None, + dbName=None, + schemaName=None, + ): + self.catName = catName + self.dbName = dbName + self.schemaName = schemaName + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.catName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.dbName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.schemaName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("ISchemaName") + if self.catName is not None: + oprot.writeFieldBegin("catName", TType.STRING, 1) + oprot.writeString(self.catName.encode("utf-8") if sys.version_info[0] == 2 else self.catName) + oprot.writeFieldEnd() + if self.dbName is not None: + oprot.writeFieldBegin("dbName", TType.STRING, 2) + oprot.writeString(self.dbName.encode("utf-8") if sys.version_info[0] == 2 else self.dbName) + oprot.writeFieldEnd() + if self.schemaName is not None: + oprot.writeFieldBegin("schemaName", TType.STRING, 3) + oprot.writeString(self.schemaName.encode("utf-8") if sys.version_info[0] == 2 else self.schemaName) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class AlterISchemaRequest: + """ + Attributes: + - name + - newSchema + + """ + + def __init__( + self, + name=None, + newSchema=None, + ): + self.name = name + self.newSchema = newSchema + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.name = ISchemaName() + self.name.read(iprot) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRUCT: + self.newSchema = ISchema() + self.newSchema.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("AlterISchemaRequest") + if self.name is not None: + oprot.writeFieldBegin("name", TType.STRUCT, 1) + self.name.write(oprot) + oprot.writeFieldEnd() + if self.newSchema is not None: + oprot.writeFieldBegin("newSchema", TType.STRUCT, 3) + self.newSchema.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class SchemaVersion: + """ + Attributes: + - schema + - version + - createdAt + - cols + - state + - description + - schemaText + - fingerprint + - name + - serDe + + """ + + def __init__( + self, + schema=None, + version=None, + createdAt=None, + cols=None, + state=None, + description=None, + schemaText=None, + fingerprint=None, + name=None, + serDe=None, + ): + self.schema = schema + self.version = version + self.createdAt = createdAt + self.cols = cols + self.state = state + self.description = description + self.schemaText = schemaText + self.fingerprint = fingerprint + self.name = name + self.serDe = serDe + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.schema = ISchemaName() + self.schema.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.I32: + self.version = iprot.readI32() + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.I64: + self.createdAt = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.LIST: + self.cols = [] + (_etype1054, _size1051) = iprot.readListBegin() + for _i1055 in range(_size1051): + _elem1056 = FieldSchema() + _elem1056.read(iprot) + self.cols.append(_elem1056) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.I32: + self.state = iprot.readI32() + else: + iprot.skip(ftype) + elif fid == 6: + if ftype == TType.STRING: + self.description = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 7: + if ftype == TType.STRING: + self.schemaText = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 8: + if ftype == TType.STRING: + self.fingerprint = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 9: + if ftype == TType.STRING: + self.name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 10: + if ftype == TType.STRUCT: + self.serDe = SerDeInfo() + self.serDe.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("SchemaVersion") + if self.schema is not None: + oprot.writeFieldBegin("schema", TType.STRUCT, 1) + self.schema.write(oprot) + oprot.writeFieldEnd() + if self.version is not None: + oprot.writeFieldBegin("version", TType.I32, 2) + oprot.writeI32(self.version) + oprot.writeFieldEnd() + if self.createdAt is not None: + oprot.writeFieldBegin("createdAt", TType.I64, 3) + oprot.writeI64(self.createdAt) + oprot.writeFieldEnd() + if self.cols is not None: + oprot.writeFieldBegin("cols", TType.LIST, 4) + oprot.writeListBegin(TType.STRUCT, len(self.cols)) + for iter1057 in self.cols: + iter1057.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.state is not None: + oprot.writeFieldBegin("state", TType.I32, 5) + oprot.writeI32(self.state) + oprot.writeFieldEnd() + if self.description is not None: + oprot.writeFieldBegin("description", TType.STRING, 6) + oprot.writeString(self.description.encode("utf-8") if sys.version_info[0] == 2 else self.description) + oprot.writeFieldEnd() + if self.schemaText is not None: + oprot.writeFieldBegin("schemaText", TType.STRING, 7) + oprot.writeString(self.schemaText.encode("utf-8") if sys.version_info[0] == 2 else self.schemaText) + oprot.writeFieldEnd() + if self.fingerprint is not None: + oprot.writeFieldBegin("fingerprint", TType.STRING, 8) + oprot.writeString(self.fingerprint.encode("utf-8") if sys.version_info[0] == 2 else self.fingerprint) + oprot.writeFieldEnd() + if self.name is not None: + oprot.writeFieldBegin("name", TType.STRING, 9) + oprot.writeString(self.name.encode("utf-8") if sys.version_info[0] == 2 else self.name) + oprot.writeFieldEnd() + if self.serDe is not None: + oprot.writeFieldBegin("serDe", TType.STRUCT, 10) + self.serDe.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class SchemaVersionDescriptor: + """ + Attributes: + - schema + - version + + """ + + def __init__( + self, + schema=None, + version=None, + ): + self.schema = schema + self.version = version + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.schema = ISchemaName() + self.schema.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.I32: + self.version = iprot.readI32() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("SchemaVersionDescriptor") + if self.schema is not None: + oprot.writeFieldBegin("schema", TType.STRUCT, 1) + self.schema.write(oprot) + oprot.writeFieldEnd() + if self.version is not None: + oprot.writeFieldBegin("version", TType.I32, 2) + oprot.writeI32(self.version) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class FindSchemasByColsRqst: + """ + Attributes: + - colName + - colNamespace + - type + + """ + + def __init__( + self, + colName=None, + colNamespace=None, + type=None, + ): + self.colName = colName + self.colNamespace = colNamespace + self.type = type + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.colName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.colNamespace = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.type = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("FindSchemasByColsRqst") + if self.colName is not None: + oprot.writeFieldBegin("colName", TType.STRING, 1) + oprot.writeString(self.colName.encode("utf-8") if sys.version_info[0] == 2 else self.colName) + oprot.writeFieldEnd() + if self.colNamespace is not None: + oprot.writeFieldBegin("colNamespace", TType.STRING, 2) + oprot.writeString(self.colNamespace.encode("utf-8") if sys.version_info[0] == 2 else self.colNamespace) + oprot.writeFieldEnd() + if self.type is not None: + oprot.writeFieldBegin("type", TType.STRING, 3) + oprot.writeString(self.type.encode("utf-8") if sys.version_info[0] == 2 else self.type) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class FindSchemasByColsResp: + """ + Attributes: + - schemaVersions + + """ + + def __init__( + self, + schemaVersions=None, + ): + self.schemaVersions = schemaVersions + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.LIST: + self.schemaVersions = [] + (_etype1061, _size1058) = iprot.readListBegin() + for _i1062 in range(_size1058): + _elem1063 = SchemaVersionDescriptor() + _elem1063.read(iprot) + self.schemaVersions.append(_elem1063) + iprot.readListEnd() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("FindSchemasByColsResp") + if self.schemaVersions is not None: + oprot.writeFieldBegin("schemaVersions", TType.LIST, 1) + oprot.writeListBegin(TType.STRUCT, len(self.schemaVersions)) + for iter1064 in self.schemaVersions: + iter1064.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class MapSchemaVersionToSerdeRequest: + """ + Attributes: + - schemaVersion + - serdeName + + """ + + def __init__( + self, + schemaVersion=None, + serdeName=None, + ): + self.schemaVersion = schemaVersion + self.serdeName = serdeName + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.schemaVersion = SchemaVersionDescriptor() + self.schemaVersion.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.serdeName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("MapSchemaVersionToSerdeRequest") + if self.schemaVersion is not None: + oprot.writeFieldBegin("schemaVersion", TType.STRUCT, 1) + self.schemaVersion.write(oprot) + oprot.writeFieldEnd() + if self.serdeName is not None: + oprot.writeFieldBegin("serdeName", TType.STRING, 2) + oprot.writeString(self.serdeName.encode("utf-8") if sys.version_info[0] == 2 else self.serdeName) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class SetSchemaVersionStateRequest: + """ + Attributes: + - schemaVersion + - state + + """ + + def __init__( + self, + schemaVersion=None, + state=None, + ): + self.schemaVersion = schemaVersion + self.state = state + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.schemaVersion = SchemaVersionDescriptor() + self.schemaVersion.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.I32: + self.state = iprot.readI32() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("SetSchemaVersionStateRequest") + if self.schemaVersion is not None: + oprot.writeFieldBegin("schemaVersion", TType.STRUCT, 1) + self.schemaVersion.write(oprot) + oprot.writeFieldEnd() + if self.state is not None: + oprot.writeFieldBegin("state", TType.I32, 2) + oprot.writeI32(self.state) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class GetSerdeRequest: + """ + Attributes: + - serdeName + + """ + + def __init__( + self, + serdeName=None, + ): + self.serdeName = serdeName + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.serdeName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("GetSerdeRequest") + if self.serdeName is not None: + oprot.writeFieldBegin("serdeName", TType.STRING, 1) + oprot.writeString(self.serdeName.encode("utf-8") if sys.version_info[0] == 2 else self.serdeName) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class RuntimeStat: + """ + Attributes: + - createTime + - weight + - payload + + """ + + def __init__( + self, + createTime=None, + weight=None, + payload=None, + ): + self.createTime = createTime + self.weight = weight + self.payload = payload + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.I32: + self.createTime = iprot.readI32() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.I32: + self.weight = iprot.readI32() + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.payload = iprot.readBinary() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("RuntimeStat") + if self.createTime is not None: + oprot.writeFieldBegin("createTime", TType.I32, 1) + oprot.writeI32(self.createTime) + oprot.writeFieldEnd() + if self.weight is not None: + oprot.writeFieldBegin("weight", TType.I32, 2) + oprot.writeI32(self.weight) + oprot.writeFieldEnd() + if self.payload is not None: + oprot.writeFieldBegin("payload", TType.STRING, 3) + oprot.writeBinary(self.payload) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.weight is None: + raise TProtocolException(message="Required field weight is unset!") + if self.payload is None: + raise TProtocolException(message="Required field payload is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class GetRuntimeStatsRequest: + """ + Attributes: + - maxWeight + - maxCreateTime + + """ + + def __init__( + self, + maxWeight=None, + maxCreateTime=None, + ): + self.maxWeight = maxWeight + self.maxCreateTime = maxCreateTime + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.I32: + self.maxWeight = iprot.readI32() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.I32: + self.maxCreateTime = iprot.readI32() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("GetRuntimeStatsRequest") + if self.maxWeight is not None: + oprot.writeFieldBegin("maxWeight", TType.I32, 1) + oprot.writeI32(self.maxWeight) + oprot.writeFieldEnd() + if self.maxCreateTime is not None: + oprot.writeFieldBegin("maxCreateTime", TType.I32, 2) + oprot.writeI32(self.maxCreateTime) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.maxWeight is None: + raise TProtocolException(message="Required field maxWeight is unset!") + if self.maxCreateTime is None: + raise TProtocolException(message="Required field maxCreateTime is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class CreateTableRequest: + """ + Attributes: + - table + - envContext + - primaryKeys + - foreignKeys + - uniqueConstraints + - notNullConstraints + - defaultConstraints + - checkConstraints + - processorCapabilities + - processorIdentifier + + """ + + def __init__( + self, + table=None, + envContext=None, + primaryKeys=None, + foreignKeys=None, + uniqueConstraints=None, + notNullConstraints=None, + defaultConstraints=None, + checkConstraints=None, + processorCapabilities=None, + processorIdentifier=None, + ): + self.table = table + self.envContext = envContext + self.primaryKeys = primaryKeys + self.foreignKeys = foreignKeys + self.uniqueConstraints = uniqueConstraints + self.notNullConstraints = notNullConstraints + self.defaultConstraints = defaultConstraints + self.checkConstraints = checkConstraints + self.processorCapabilities = processorCapabilities + self.processorIdentifier = processorIdentifier + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.table = Table() + self.table.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.envContext = EnvironmentContext() + self.envContext.read(iprot) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.LIST: + self.primaryKeys = [] + (_etype1068, _size1065) = iprot.readListBegin() + for _i1069 in range(_size1065): + _elem1070 = SQLPrimaryKey() + _elem1070.read(iprot) + self.primaryKeys.append(_elem1070) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.LIST: + self.foreignKeys = [] + (_etype1074, _size1071) = iprot.readListBegin() + for _i1075 in range(_size1071): + _elem1076 = SQLForeignKey() + _elem1076.read(iprot) + self.foreignKeys.append(_elem1076) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.LIST: + self.uniqueConstraints = [] + (_etype1080, _size1077) = iprot.readListBegin() + for _i1081 in range(_size1077): + _elem1082 = SQLUniqueConstraint() + _elem1082.read(iprot) + self.uniqueConstraints.append(_elem1082) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 6: + if ftype == TType.LIST: + self.notNullConstraints = [] + (_etype1086, _size1083) = iprot.readListBegin() + for _i1087 in range(_size1083): + _elem1088 = SQLNotNullConstraint() + _elem1088.read(iprot) + self.notNullConstraints.append(_elem1088) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 7: + if ftype == TType.LIST: + self.defaultConstraints = [] + (_etype1092, _size1089) = iprot.readListBegin() + for _i1093 in range(_size1089): + _elem1094 = SQLDefaultConstraint() + _elem1094.read(iprot) + self.defaultConstraints.append(_elem1094) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 8: + if ftype == TType.LIST: + self.checkConstraints = [] + (_etype1098, _size1095) = iprot.readListBegin() + for _i1099 in range(_size1095): + _elem1100 = SQLCheckConstraint() + _elem1100.read(iprot) + self.checkConstraints.append(_elem1100) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 9: + if ftype == TType.LIST: + self.processorCapabilities = [] + (_etype1104, _size1101) = iprot.readListBegin() + for _i1105 in range(_size1101): + _elem1106 = ( + iprot.readString().decode("utf-8", errors="replace") + if sys.version_info[0] == 2 + else iprot.readString() + ) + self.processorCapabilities.append(_elem1106) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 10: + if ftype == TType.STRING: + self.processorIdentifier = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("CreateTableRequest") + if self.table is not None: + oprot.writeFieldBegin("table", TType.STRUCT, 1) + self.table.write(oprot) + oprot.writeFieldEnd() + if self.envContext is not None: + oprot.writeFieldBegin("envContext", TType.STRUCT, 2) + self.envContext.write(oprot) + oprot.writeFieldEnd() + if self.primaryKeys is not None: + oprot.writeFieldBegin("primaryKeys", TType.LIST, 3) + oprot.writeListBegin(TType.STRUCT, len(self.primaryKeys)) + for iter1107 in self.primaryKeys: + iter1107.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.foreignKeys is not None: + oprot.writeFieldBegin("foreignKeys", TType.LIST, 4) + oprot.writeListBegin(TType.STRUCT, len(self.foreignKeys)) + for iter1108 in self.foreignKeys: + iter1108.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.uniqueConstraints is not None: + oprot.writeFieldBegin("uniqueConstraints", TType.LIST, 5) + oprot.writeListBegin(TType.STRUCT, len(self.uniqueConstraints)) + for iter1109 in self.uniqueConstraints: + iter1109.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.notNullConstraints is not None: + oprot.writeFieldBegin("notNullConstraints", TType.LIST, 6) + oprot.writeListBegin(TType.STRUCT, len(self.notNullConstraints)) + for iter1110 in self.notNullConstraints: + iter1110.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.defaultConstraints is not None: + oprot.writeFieldBegin("defaultConstraints", TType.LIST, 7) + oprot.writeListBegin(TType.STRUCT, len(self.defaultConstraints)) + for iter1111 in self.defaultConstraints: + iter1111.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.checkConstraints is not None: + oprot.writeFieldBegin("checkConstraints", TType.LIST, 8) + oprot.writeListBegin(TType.STRUCT, len(self.checkConstraints)) + for iter1112 in self.checkConstraints: + iter1112.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.processorCapabilities is not None: + oprot.writeFieldBegin("processorCapabilities", TType.LIST, 9) + oprot.writeListBegin(TType.STRING, len(self.processorCapabilities)) + for iter1113 in self.processorCapabilities: + oprot.writeString(iter1113.encode("utf-8") if sys.version_info[0] == 2 else iter1113) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.processorIdentifier is not None: + oprot.writeFieldBegin("processorIdentifier", TType.STRING, 10) + oprot.writeString(self.processorIdentifier.encode("utf-8") if sys.version_info[0] == 2 else self.processorIdentifier) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.table is None: + raise TProtocolException(message="Required field table is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class CreateDatabaseRequest: + """ + Attributes: + - databaseName + - description + - locationUri + - parameters + - privileges + - ownerName + - ownerType + - catalogName + - createTime + - managedLocationUri + - type + - dataConnectorName + + """ + + def __init__( + self, + databaseName=None, + description=None, + locationUri=None, + parameters=None, + privileges=None, + ownerName=None, + ownerType=None, + catalogName=None, + createTime=None, + managedLocationUri=None, + type=None, + dataConnectorName=None, + ): + self.databaseName = databaseName + self.description = description + self.locationUri = locationUri + self.parameters = parameters + self.privileges = privileges + self.ownerName = ownerName + self.ownerType = ownerType + self.catalogName = catalogName + self.createTime = createTime + self.managedLocationUri = managedLocationUri + self.type = type + self.dataConnectorName = dataConnectorName + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.databaseName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.description = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.locationUri = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.MAP: + self.parameters = {} + (_ktype1115, _vtype1116, _size1114) = iprot.readMapBegin() + for _i1118 in range(_size1114): + _key1119 = ( + iprot.readString().decode("utf-8", errors="replace") + if sys.version_info[0] == 2 + else iprot.readString() + ) + _val1120 = ( + iprot.readString().decode("utf-8", errors="replace") + if sys.version_info[0] == 2 + else iprot.readString() + ) + self.parameters[_key1119] = _val1120 + iprot.readMapEnd() + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.STRUCT: + self.privileges = PrincipalPrivilegeSet() + self.privileges.read(iprot) + else: + iprot.skip(ftype) + elif fid == 6: + if ftype == TType.STRING: + self.ownerName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 7: + if ftype == TType.I32: + self.ownerType = iprot.readI32() + else: + iprot.skip(ftype) + elif fid == 8: + if ftype == TType.STRING: + self.catalogName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 9: + if ftype == TType.I32: + self.createTime = iprot.readI32() + else: + iprot.skip(ftype) + elif fid == 10: + if ftype == TType.STRING: + self.managedLocationUri = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 11: + if ftype == TType.STRING: + self.type = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 12: + if ftype == TType.STRING: + self.dataConnectorName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("CreateDatabaseRequest") + if self.databaseName is not None: + oprot.writeFieldBegin("databaseName", TType.STRING, 1) + oprot.writeString(self.databaseName.encode("utf-8") if sys.version_info[0] == 2 else self.databaseName) + oprot.writeFieldEnd() + if self.description is not None: + oprot.writeFieldBegin("description", TType.STRING, 2) + oprot.writeString(self.description.encode("utf-8") if sys.version_info[0] == 2 else self.description) + oprot.writeFieldEnd() + if self.locationUri is not None: + oprot.writeFieldBegin("locationUri", TType.STRING, 3) + oprot.writeString(self.locationUri.encode("utf-8") if sys.version_info[0] == 2 else self.locationUri) + oprot.writeFieldEnd() + if self.parameters is not None: + oprot.writeFieldBegin("parameters", TType.MAP, 4) + oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.parameters)) + for kiter1121, viter1122 in self.parameters.items(): + oprot.writeString(kiter1121.encode("utf-8") if sys.version_info[0] == 2 else kiter1121) + oprot.writeString(viter1122.encode("utf-8") if sys.version_info[0] == 2 else viter1122) + oprot.writeMapEnd() + oprot.writeFieldEnd() + if self.privileges is not None: + oprot.writeFieldBegin("privileges", TType.STRUCT, 5) + self.privileges.write(oprot) + oprot.writeFieldEnd() + if self.ownerName is not None: + oprot.writeFieldBegin("ownerName", TType.STRING, 6) + oprot.writeString(self.ownerName.encode("utf-8") if sys.version_info[0] == 2 else self.ownerName) + oprot.writeFieldEnd() + if self.ownerType is not None: + oprot.writeFieldBegin("ownerType", TType.I32, 7) + oprot.writeI32(self.ownerType) + oprot.writeFieldEnd() + if self.catalogName is not None: + oprot.writeFieldBegin("catalogName", TType.STRING, 8) + oprot.writeString(self.catalogName.encode("utf-8") if sys.version_info[0] == 2 else self.catalogName) + oprot.writeFieldEnd() + if self.createTime is not None: + oprot.writeFieldBegin("createTime", TType.I32, 9) + oprot.writeI32(self.createTime) + oprot.writeFieldEnd() + if self.managedLocationUri is not None: + oprot.writeFieldBegin("managedLocationUri", TType.STRING, 10) + oprot.writeString(self.managedLocationUri.encode("utf-8") if sys.version_info[0] == 2 else self.managedLocationUri) + oprot.writeFieldEnd() + if self.type is not None: + oprot.writeFieldBegin("type", TType.STRING, 11) + oprot.writeString(self.type.encode("utf-8") if sys.version_info[0] == 2 else self.type) + oprot.writeFieldEnd() + if self.dataConnectorName is not None: + oprot.writeFieldBegin("dataConnectorName", TType.STRING, 12) + oprot.writeString(self.dataConnectorName.encode("utf-8") if sys.version_info[0] == 2 else self.dataConnectorName) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.databaseName is None: + raise TProtocolException(message="Required field databaseName is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class CreateDataConnectorRequest: + """ + Attributes: + - connector + + """ + + def __init__( + self, + connector=None, + ): + self.connector = connector + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.connector = DataConnector() + self.connector.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("CreateDataConnectorRequest") + if self.connector is not None: + oprot.writeFieldBegin("connector", TType.STRUCT, 1) + self.connector.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class GetDataConnectorRequest: + """ + Attributes: + - connectorName + + """ + + def __init__( + self, + connectorName=None, + ): + self.connectorName = connectorName + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.connectorName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("GetDataConnectorRequest") + if self.connectorName is not None: + oprot.writeFieldBegin("connectorName", TType.STRING, 1) + oprot.writeString(self.connectorName.encode("utf-8") if sys.version_info[0] == 2 else self.connectorName) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.connectorName is None: + raise TProtocolException(message="Required field connectorName is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class ScheduledQueryPollRequest: + """ + Attributes: + - clusterNamespace + + """ + + def __init__( + self, + clusterNamespace=None, + ): + self.clusterNamespace = clusterNamespace + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.clusterNamespace = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("ScheduledQueryPollRequest") + if self.clusterNamespace is not None: + oprot.writeFieldBegin("clusterNamespace", TType.STRING, 1) + oprot.writeString(self.clusterNamespace.encode("utf-8") if sys.version_info[0] == 2 else self.clusterNamespace) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.clusterNamespace is None: + raise TProtocolException(message="Required field clusterNamespace is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class ScheduledQueryKey: + """ + Attributes: + - scheduleName + - clusterNamespace + + """ + + def __init__( + self, + scheduleName=None, + clusterNamespace=None, + ): + self.scheduleName = scheduleName + self.clusterNamespace = clusterNamespace + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.scheduleName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.clusterNamespace = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("ScheduledQueryKey") + if self.scheduleName is not None: + oprot.writeFieldBegin("scheduleName", TType.STRING, 1) + oprot.writeString(self.scheduleName.encode("utf-8") if sys.version_info[0] == 2 else self.scheduleName) + oprot.writeFieldEnd() + if self.clusterNamespace is not None: + oprot.writeFieldBegin("clusterNamespace", TType.STRING, 2) + oprot.writeString(self.clusterNamespace.encode("utf-8") if sys.version_info[0] == 2 else self.clusterNamespace) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.scheduleName is None: + raise TProtocolException(message="Required field scheduleName is unset!") + if self.clusterNamespace is None: + raise TProtocolException(message="Required field clusterNamespace is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class ScheduledQueryPollResponse: + """ + Attributes: + - scheduleKey + - executionId + - query + - user + + """ + + def __init__( + self, + scheduleKey=None, + executionId=None, + query=None, + user=None, + ): + self.scheduleKey = scheduleKey + self.executionId = executionId + self.query = query + self.user = user + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.scheduleKey = ScheduledQueryKey() + self.scheduleKey.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.I64: + self.executionId = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.query = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRING: + self.user = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("ScheduledQueryPollResponse") + if self.scheduleKey is not None: + oprot.writeFieldBegin("scheduleKey", TType.STRUCT, 1) + self.scheduleKey.write(oprot) + oprot.writeFieldEnd() + if self.executionId is not None: + oprot.writeFieldBegin("executionId", TType.I64, 2) + oprot.writeI64(self.executionId) + oprot.writeFieldEnd() + if self.query is not None: + oprot.writeFieldBegin("query", TType.STRING, 3) + oprot.writeString(self.query.encode("utf-8") if sys.version_info[0] == 2 else self.query) + oprot.writeFieldEnd() + if self.user is not None: + oprot.writeFieldBegin("user", TType.STRING, 4) + oprot.writeString(self.user.encode("utf-8") if sys.version_info[0] == 2 else self.user) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class ScheduledQuery: + """ + Attributes: + - scheduleKey + - enabled + - schedule + - user + - query + - nextExecution + + """ + + def __init__( + self, + scheduleKey=None, + enabled=None, + schedule=None, + user=None, + query=None, + nextExecution=None, + ): + self.scheduleKey = scheduleKey + self.enabled = enabled + self.schedule = schedule + self.user = user + self.query = query + self.nextExecution = nextExecution + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.scheduleKey = ScheduledQueryKey() + self.scheduleKey.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.BOOL: + self.enabled = iprot.readBool() + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRING: + self.schedule = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.STRING: + self.user = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 6: + if ftype == TType.STRING: + self.query = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 7: + if ftype == TType.I32: + self.nextExecution = iprot.readI32() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("ScheduledQuery") + if self.scheduleKey is not None: + oprot.writeFieldBegin("scheduleKey", TType.STRUCT, 1) + self.scheduleKey.write(oprot) + oprot.writeFieldEnd() + if self.enabled is not None: + oprot.writeFieldBegin("enabled", TType.BOOL, 2) + oprot.writeBool(self.enabled) + oprot.writeFieldEnd() + if self.schedule is not None: + oprot.writeFieldBegin("schedule", TType.STRING, 4) + oprot.writeString(self.schedule.encode("utf-8") if sys.version_info[0] == 2 else self.schedule) + oprot.writeFieldEnd() + if self.user is not None: + oprot.writeFieldBegin("user", TType.STRING, 5) + oprot.writeString(self.user.encode("utf-8") if sys.version_info[0] == 2 else self.user) + oprot.writeFieldEnd() + if self.query is not None: + oprot.writeFieldBegin("query", TType.STRING, 6) + oprot.writeString(self.query.encode("utf-8") if sys.version_info[0] == 2 else self.query) + oprot.writeFieldEnd() + if self.nextExecution is not None: + oprot.writeFieldBegin("nextExecution", TType.I32, 7) + oprot.writeI32(self.nextExecution) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.scheduleKey is None: + raise TProtocolException(message="Required field scheduleKey is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class ScheduledQueryMaintenanceRequest: + """ + Attributes: + - type + - scheduledQuery + + """ + + def __init__( + self, + type=None, + scheduledQuery=None, + ): + self.type = type + self.scheduledQuery = scheduledQuery + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.I32: + self.type = iprot.readI32() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.scheduledQuery = ScheduledQuery() + self.scheduledQuery.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("ScheduledQueryMaintenanceRequest") + if self.type is not None: + oprot.writeFieldBegin("type", TType.I32, 1) + oprot.writeI32(self.type) + oprot.writeFieldEnd() + if self.scheduledQuery is not None: + oprot.writeFieldBegin("scheduledQuery", TType.STRUCT, 2) + self.scheduledQuery.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.type is None: + raise TProtocolException(message="Required field type is unset!") + if self.scheduledQuery is None: + raise TProtocolException(message="Required field scheduledQuery is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class ScheduledQueryProgressInfo: + """ + Attributes: + - scheduledExecutionId + - state + - executorQueryId + - errorMessage + + """ + + def __init__( + self, + scheduledExecutionId=None, + state=None, + executorQueryId=None, + errorMessage=None, + ): + self.scheduledExecutionId = scheduledExecutionId + self.state = state + self.executorQueryId = executorQueryId + self.errorMessage = errorMessage + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.I64: + self.scheduledExecutionId = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.I32: + self.state = iprot.readI32() + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.executorQueryId = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRING: + self.errorMessage = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("ScheduledQueryProgressInfo") + if self.scheduledExecutionId is not None: + oprot.writeFieldBegin("scheduledExecutionId", TType.I64, 1) + oprot.writeI64(self.scheduledExecutionId) + oprot.writeFieldEnd() + if self.state is not None: + oprot.writeFieldBegin("state", TType.I32, 2) + oprot.writeI32(self.state) + oprot.writeFieldEnd() + if self.executorQueryId is not None: + oprot.writeFieldBegin("executorQueryId", TType.STRING, 3) + oprot.writeString(self.executorQueryId.encode("utf-8") if sys.version_info[0] == 2 else self.executorQueryId) + oprot.writeFieldEnd() + if self.errorMessage is not None: + oprot.writeFieldBegin("errorMessage", TType.STRING, 4) + oprot.writeString(self.errorMessage.encode("utf-8") if sys.version_info[0] == 2 else self.errorMessage) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.scheduledExecutionId is None: + raise TProtocolException(message="Required field scheduledExecutionId is unset!") + if self.state is None: + raise TProtocolException(message="Required field state is unset!") + if self.executorQueryId is None: + raise TProtocolException(message="Required field executorQueryId is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class AlterPartitionsRequest: + """ + Attributes: + - catName + - dbName + - tableName + - partitions + - environmentContext + - writeId + - validWriteIdList + + """ + + def __init__( + self, + catName=None, + dbName=None, + tableName=None, + partitions=None, + environmentContext=None, + writeId=-1, + validWriteIdList=None, + ): + self.catName = catName + self.dbName = dbName + self.tableName = tableName + self.partitions = partitions + self.environmentContext = environmentContext + self.writeId = writeId + self.validWriteIdList = validWriteIdList + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.catName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.dbName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.tableName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.LIST: + self.partitions = [] + (_etype1126, _size1123) = iprot.readListBegin() + for _i1127 in range(_size1123): + _elem1128 = Partition() + _elem1128.read(iprot) + self.partitions.append(_elem1128) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.STRUCT: + self.environmentContext = EnvironmentContext() + self.environmentContext.read(iprot) + else: + iprot.skip(ftype) + elif fid == 6: + if ftype == TType.I64: + self.writeId = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 7: + if ftype == TType.STRING: + self.validWriteIdList = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("AlterPartitionsRequest") + if self.catName is not None: + oprot.writeFieldBegin("catName", TType.STRING, 1) + oprot.writeString(self.catName.encode("utf-8") if sys.version_info[0] == 2 else self.catName) + oprot.writeFieldEnd() + if self.dbName is not None: + oprot.writeFieldBegin("dbName", TType.STRING, 2) + oprot.writeString(self.dbName.encode("utf-8") if sys.version_info[0] == 2 else self.dbName) + oprot.writeFieldEnd() + if self.tableName is not None: + oprot.writeFieldBegin("tableName", TType.STRING, 3) + oprot.writeString(self.tableName.encode("utf-8") if sys.version_info[0] == 2 else self.tableName) + oprot.writeFieldEnd() + if self.partitions is not None: + oprot.writeFieldBegin("partitions", TType.LIST, 4) + oprot.writeListBegin(TType.STRUCT, len(self.partitions)) + for iter1129 in self.partitions: + iter1129.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.environmentContext is not None: + oprot.writeFieldBegin("environmentContext", TType.STRUCT, 5) + self.environmentContext.write(oprot) + oprot.writeFieldEnd() + if self.writeId is not None: + oprot.writeFieldBegin("writeId", TType.I64, 6) + oprot.writeI64(self.writeId) + oprot.writeFieldEnd() + if self.validWriteIdList is not None: + oprot.writeFieldBegin("validWriteIdList", TType.STRING, 7) + oprot.writeString(self.validWriteIdList.encode("utf-8") if sys.version_info[0] == 2 else self.validWriteIdList) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.dbName is None: + raise TProtocolException(message="Required field dbName is unset!") + if self.tableName is None: + raise TProtocolException(message="Required field tableName is unset!") + if self.partitions is None: + raise TProtocolException(message="Required field partitions is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class AlterPartitionsResponse: + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("AlterPartitionsResponse") + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class RenamePartitionRequest: + """ + Attributes: + - catName + - dbName + - tableName + - partVals + - newPart + - validWriteIdList + - txnId + - clonePart + + """ + + def __init__( + self, + catName=None, + dbName=None, + tableName=None, + partVals=None, + newPart=None, + validWriteIdList=None, + txnId=None, + clonePart=None, + ): + self.catName = catName + self.dbName = dbName + self.tableName = tableName + self.partVals = partVals + self.newPart = newPart + self.validWriteIdList = validWriteIdList + self.txnId = txnId + self.clonePart = clonePart + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.catName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.dbName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.tableName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.LIST: + self.partVals = [] + (_etype1133, _size1130) = iprot.readListBegin() + for _i1134 in range(_size1130): + _elem1135 = ( + iprot.readString().decode("utf-8", errors="replace") + if sys.version_info[0] == 2 + else iprot.readString() + ) + self.partVals.append(_elem1135) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.STRUCT: + self.newPart = Partition() + self.newPart.read(iprot) + else: + iprot.skip(ftype) + elif fid == 6: + if ftype == TType.STRING: + self.validWriteIdList = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 7: + if ftype == TType.I64: + self.txnId = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 8: + if ftype == TType.BOOL: + self.clonePart = iprot.readBool() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("RenamePartitionRequest") + if self.catName is not None: + oprot.writeFieldBegin("catName", TType.STRING, 1) + oprot.writeString(self.catName.encode("utf-8") if sys.version_info[0] == 2 else self.catName) + oprot.writeFieldEnd() + if self.dbName is not None: + oprot.writeFieldBegin("dbName", TType.STRING, 2) + oprot.writeString(self.dbName.encode("utf-8") if sys.version_info[0] == 2 else self.dbName) + oprot.writeFieldEnd() + if self.tableName is not None: + oprot.writeFieldBegin("tableName", TType.STRING, 3) + oprot.writeString(self.tableName.encode("utf-8") if sys.version_info[0] == 2 else self.tableName) + oprot.writeFieldEnd() + if self.partVals is not None: + oprot.writeFieldBegin("partVals", TType.LIST, 4) + oprot.writeListBegin(TType.STRING, len(self.partVals)) + for iter1136 in self.partVals: + oprot.writeString(iter1136.encode("utf-8") if sys.version_info[0] == 2 else iter1136) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.newPart is not None: + oprot.writeFieldBegin("newPart", TType.STRUCT, 5) + self.newPart.write(oprot) + oprot.writeFieldEnd() + if self.validWriteIdList is not None: + oprot.writeFieldBegin("validWriteIdList", TType.STRING, 6) + oprot.writeString(self.validWriteIdList.encode("utf-8") if sys.version_info[0] == 2 else self.validWriteIdList) + oprot.writeFieldEnd() + if self.txnId is not None: + oprot.writeFieldBegin("txnId", TType.I64, 7) + oprot.writeI64(self.txnId) + oprot.writeFieldEnd() + if self.clonePart is not None: + oprot.writeFieldBegin("clonePart", TType.BOOL, 8) + oprot.writeBool(self.clonePart) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.dbName is None: + raise TProtocolException(message="Required field dbName is unset!") + if self.tableName is None: + raise TProtocolException(message="Required field tableName is unset!") + if self.partVals is None: + raise TProtocolException(message="Required field partVals is unset!") + if self.newPart is None: + raise TProtocolException(message="Required field newPart is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class RenamePartitionResponse: + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("RenamePartitionResponse") + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class AlterTableRequest: + """ + Attributes: + - catName + - dbName + - tableName + - table + - environmentContext + - writeId + - validWriteIdList + - processorCapabilities + - processorIdentifier + + """ + + def __init__( + self, + catName=None, + dbName=None, + tableName=None, + table=None, + environmentContext=None, + writeId=-1, + validWriteIdList=None, + processorCapabilities=None, + processorIdentifier=None, + ): + self.catName = catName + self.dbName = dbName + self.tableName = tableName + self.table = table + self.environmentContext = environmentContext + self.writeId = writeId + self.validWriteIdList = validWriteIdList + self.processorCapabilities = processorCapabilities + self.processorIdentifier = processorIdentifier + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.catName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.dbName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.tableName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRUCT: + self.table = Table() + self.table.read(iprot) + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.STRUCT: + self.environmentContext = EnvironmentContext() + self.environmentContext.read(iprot) + else: + iprot.skip(ftype) + elif fid == 6: + if ftype == TType.I64: + self.writeId = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 7: + if ftype == TType.STRING: + self.validWriteIdList = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 8: + if ftype == TType.LIST: + self.processorCapabilities = [] + (_etype1140, _size1137) = iprot.readListBegin() + for _i1141 in range(_size1137): + _elem1142 = ( + iprot.readString().decode("utf-8", errors="replace") + if sys.version_info[0] == 2 + else iprot.readString() + ) + self.processorCapabilities.append(_elem1142) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 9: + if ftype == TType.STRING: + self.processorIdentifier = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("AlterTableRequest") + if self.catName is not None: + oprot.writeFieldBegin("catName", TType.STRING, 1) + oprot.writeString(self.catName.encode("utf-8") if sys.version_info[0] == 2 else self.catName) + oprot.writeFieldEnd() + if self.dbName is not None: + oprot.writeFieldBegin("dbName", TType.STRING, 2) + oprot.writeString(self.dbName.encode("utf-8") if sys.version_info[0] == 2 else self.dbName) + oprot.writeFieldEnd() + if self.tableName is not None: + oprot.writeFieldBegin("tableName", TType.STRING, 3) + oprot.writeString(self.tableName.encode("utf-8") if sys.version_info[0] == 2 else self.tableName) + oprot.writeFieldEnd() + if self.table is not None: + oprot.writeFieldBegin("table", TType.STRUCT, 4) + self.table.write(oprot) + oprot.writeFieldEnd() + if self.environmentContext is not None: + oprot.writeFieldBegin("environmentContext", TType.STRUCT, 5) + self.environmentContext.write(oprot) + oprot.writeFieldEnd() + if self.writeId is not None: + oprot.writeFieldBegin("writeId", TType.I64, 6) + oprot.writeI64(self.writeId) + oprot.writeFieldEnd() + if self.validWriteIdList is not None: + oprot.writeFieldBegin("validWriteIdList", TType.STRING, 7) + oprot.writeString(self.validWriteIdList.encode("utf-8") if sys.version_info[0] == 2 else self.validWriteIdList) + oprot.writeFieldEnd() + if self.processorCapabilities is not None: + oprot.writeFieldBegin("processorCapabilities", TType.LIST, 8) + oprot.writeListBegin(TType.STRING, len(self.processorCapabilities)) + for iter1143 in self.processorCapabilities: + oprot.writeString(iter1143.encode("utf-8") if sys.version_info[0] == 2 else iter1143) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.processorIdentifier is not None: + oprot.writeFieldBegin("processorIdentifier", TType.STRING, 9) + oprot.writeString(self.processorIdentifier.encode("utf-8") if sys.version_info[0] == 2 else self.processorIdentifier) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.dbName is None: + raise TProtocolException(message="Required field dbName is unset!") + if self.tableName is None: + raise TProtocolException(message="Required field tableName is unset!") + if self.table is None: + raise TProtocolException(message="Required field table is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class AlterTableResponse: + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("AlterTableResponse") + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class GetPartitionsFilterSpec: + """ + Attributes: + - filterMode + - filters + + """ + + def __init__( + self, + filterMode=None, + filters=None, + ): + self.filterMode = filterMode + self.filters = filters + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 7: + if ftype == TType.I32: + self.filterMode = iprot.readI32() + else: + iprot.skip(ftype) + elif fid == 8: + if ftype == TType.LIST: + self.filters = [] + (_etype1147, _size1144) = iprot.readListBegin() + for _i1148 in range(_size1144): + _elem1149 = ( + iprot.readString().decode("utf-8", errors="replace") + if sys.version_info[0] == 2 + else iprot.readString() + ) + self.filters.append(_elem1149) + iprot.readListEnd() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("GetPartitionsFilterSpec") + if self.filterMode is not None: + oprot.writeFieldBegin("filterMode", TType.I32, 7) + oprot.writeI32(self.filterMode) + oprot.writeFieldEnd() + if self.filters is not None: + oprot.writeFieldBegin("filters", TType.LIST, 8) + oprot.writeListBegin(TType.STRING, len(self.filters)) + for iter1150 in self.filters: + oprot.writeString(iter1150.encode("utf-8") if sys.version_info[0] == 2 else iter1150) + oprot.writeListEnd() + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class GetPartitionsResponse: + """ + Attributes: + - partitionSpec + + """ + + def __init__( + self, + partitionSpec=None, + ): + self.partitionSpec = partitionSpec + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.LIST: + self.partitionSpec = [] + (_etype1154, _size1151) = iprot.readListBegin() + for _i1155 in range(_size1151): + _elem1156 = PartitionSpec() + _elem1156.read(iprot) + self.partitionSpec.append(_elem1156) + iprot.readListEnd() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("GetPartitionsResponse") + if self.partitionSpec is not None: + oprot.writeFieldBegin("partitionSpec", TType.LIST, 1) + oprot.writeListBegin(TType.STRUCT, len(self.partitionSpec)) + for iter1157 in self.partitionSpec: + iter1157.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class GetPartitionsRequest: + """ + Attributes: + - catName + - dbName + - tblName + - withAuth + - user + - groupNames + - projectionSpec + - filterSpec + - processorCapabilities + - processorIdentifier + - validWriteIdList + + """ + + def __init__( + self, + catName=None, + dbName=None, + tblName=None, + withAuth=None, + user=None, + groupNames=None, + projectionSpec=None, + filterSpec=None, + processorCapabilities=None, + processorIdentifier=None, + validWriteIdList=None, + ): + self.catName = catName + self.dbName = dbName + self.tblName = tblName + self.withAuth = withAuth + self.user = user + self.groupNames = groupNames + self.projectionSpec = projectionSpec + self.filterSpec = filterSpec + self.processorCapabilities = processorCapabilities + self.processorIdentifier = processorIdentifier + self.validWriteIdList = validWriteIdList + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.catName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.dbName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.tblName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.BOOL: + self.withAuth = iprot.readBool() + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.STRING: + self.user = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 6: + if ftype == TType.LIST: + self.groupNames = [] + (_etype1161, _size1158) = iprot.readListBegin() + for _i1162 in range(_size1158): + _elem1163 = ( + iprot.readString().decode("utf-8", errors="replace") + if sys.version_info[0] == 2 + else iprot.readString() + ) + self.groupNames.append(_elem1163) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 7: + if ftype == TType.STRUCT: + self.projectionSpec = GetProjectionsSpec() + self.projectionSpec.read(iprot) + else: + iprot.skip(ftype) + elif fid == 8: + if ftype == TType.STRUCT: + self.filterSpec = GetPartitionsFilterSpec() + self.filterSpec.read(iprot) + else: + iprot.skip(ftype) + elif fid == 9: + if ftype == TType.LIST: + self.processorCapabilities = [] + (_etype1167, _size1164) = iprot.readListBegin() + for _i1168 in range(_size1164): + _elem1169 = ( + iprot.readString().decode("utf-8", errors="replace") + if sys.version_info[0] == 2 + else iprot.readString() + ) + self.processorCapabilities.append(_elem1169) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 10: + if ftype == TType.STRING: + self.processorIdentifier = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 11: + if ftype == TType.STRING: + self.validWriteIdList = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("GetPartitionsRequest") + if self.catName is not None: + oprot.writeFieldBegin("catName", TType.STRING, 1) + oprot.writeString(self.catName.encode("utf-8") if sys.version_info[0] == 2 else self.catName) + oprot.writeFieldEnd() + if self.dbName is not None: + oprot.writeFieldBegin("dbName", TType.STRING, 2) + oprot.writeString(self.dbName.encode("utf-8") if sys.version_info[0] == 2 else self.dbName) + oprot.writeFieldEnd() + if self.tblName is not None: + oprot.writeFieldBegin("tblName", TType.STRING, 3) + oprot.writeString(self.tblName.encode("utf-8") if sys.version_info[0] == 2 else self.tblName) + oprot.writeFieldEnd() + if self.withAuth is not None: + oprot.writeFieldBegin("withAuth", TType.BOOL, 4) + oprot.writeBool(self.withAuth) + oprot.writeFieldEnd() + if self.user is not None: + oprot.writeFieldBegin("user", TType.STRING, 5) + oprot.writeString(self.user.encode("utf-8") if sys.version_info[0] == 2 else self.user) + oprot.writeFieldEnd() + if self.groupNames is not None: + oprot.writeFieldBegin("groupNames", TType.LIST, 6) + oprot.writeListBegin(TType.STRING, len(self.groupNames)) + for iter1170 in self.groupNames: + oprot.writeString(iter1170.encode("utf-8") if sys.version_info[0] == 2 else iter1170) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.projectionSpec is not None: + oprot.writeFieldBegin("projectionSpec", TType.STRUCT, 7) + self.projectionSpec.write(oprot) + oprot.writeFieldEnd() + if self.filterSpec is not None: + oprot.writeFieldBegin("filterSpec", TType.STRUCT, 8) + self.filterSpec.write(oprot) + oprot.writeFieldEnd() + if self.processorCapabilities is not None: + oprot.writeFieldBegin("processorCapabilities", TType.LIST, 9) + oprot.writeListBegin(TType.STRING, len(self.processorCapabilities)) + for iter1171 in self.processorCapabilities: + oprot.writeString(iter1171.encode("utf-8") if sys.version_info[0] == 2 else iter1171) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.processorIdentifier is not None: + oprot.writeFieldBegin("processorIdentifier", TType.STRING, 10) + oprot.writeString(self.processorIdentifier.encode("utf-8") if sys.version_info[0] == 2 else self.processorIdentifier) + oprot.writeFieldEnd() + if self.validWriteIdList is not None: + oprot.writeFieldBegin("validWriteIdList", TType.STRING, 11) + oprot.writeString(self.validWriteIdList.encode("utf-8") if sys.version_info[0] == 2 else self.validWriteIdList) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class GetFieldsRequest: + """ + Attributes: + - catName + - dbName + - tblName + - envContext + - validWriteIdList + - id + + """ + + def __init__( + self, + catName=None, + dbName=None, + tblName=None, + envContext=None, + validWriteIdList=None, + id=-1, + ): + self.catName = catName + self.dbName = dbName + self.tblName = tblName + self.envContext = envContext + self.validWriteIdList = validWriteIdList + self.id = id + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.catName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.dbName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.tblName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRUCT: + self.envContext = EnvironmentContext() + self.envContext.read(iprot) + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.STRING: + self.validWriteIdList = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 6: + if ftype == TType.I64: + self.id = iprot.readI64() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("GetFieldsRequest") + if self.catName is not None: + oprot.writeFieldBegin("catName", TType.STRING, 1) + oprot.writeString(self.catName.encode("utf-8") if sys.version_info[0] == 2 else self.catName) + oprot.writeFieldEnd() + if self.dbName is not None: + oprot.writeFieldBegin("dbName", TType.STRING, 2) + oprot.writeString(self.dbName.encode("utf-8") if sys.version_info[0] == 2 else self.dbName) + oprot.writeFieldEnd() + if self.tblName is not None: + oprot.writeFieldBegin("tblName", TType.STRING, 3) + oprot.writeString(self.tblName.encode("utf-8") if sys.version_info[0] == 2 else self.tblName) + oprot.writeFieldEnd() + if self.envContext is not None: + oprot.writeFieldBegin("envContext", TType.STRUCT, 4) + self.envContext.write(oprot) + oprot.writeFieldEnd() + if self.validWriteIdList is not None: + oprot.writeFieldBegin("validWriteIdList", TType.STRING, 5) + oprot.writeString(self.validWriteIdList.encode("utf-8") if sys.version_info[0] == 2 else self.validWriteIdList) + oprot.writeFieldEnd() + if self.id is not None: + oprot.writeFieldBegin("id", TType.I64, 6) + oprot.writeI64(self.id) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.dbName is None: + raise TProtocolException(message="Required field dbName is unset!") + if self.tblName is None: + raise TProtocolException(message="Required field tblName is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class GetFieldsResponse: + """ + Attributes: + - fields + + """ + + def __init__( + self, + fields=None, + ): + self.fields = fields + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.LIST: + self.fields = [] + (_etype1175, _size1172) = iprot.readListBegin() + for _i1176 in range(_size1172): + _elem1177 = FieldSchema() + _elem1177.read(iprot) + self.fields.append(_elem1177) + iprot.readListEnd() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("GetFieldsResponse") + if self.fields is not None: + oprot.writeFieldBegin("fields", TType.LIST, 1) + oprot.writeListBegin(TType.STRUCT, len(self.fields)) + for iter1178 in self.fields: + iter1178.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.fields is None: + raise TProtocolException(message="Required field fields is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class GetSchemaRequest: + """ + Attributes: + - catName + - dbName + - tblName + - envContext + - validWriteIdList + - id + + """ + + def __init__( + self, + catName=None, + dbName=None, + tblName=None, + envContext=None, + validWriteIdList=None, + id=-1, + ): + self.catName = catName + self.dbName = dbName + self.tblName = tblName + self.envContext = envContext + self.validWriteIdList = validWriteIdList + self.id = id + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.catName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.dbName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.tblName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRUCT: + self.envContext = EnvironmentContext() + self.envContext.read(iprot) + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.STRING: + self.validWriteIdList = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 6: + if ftype == TType.I64: + self.id = iprot.readI64() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("GetSchemaRequest") + if self.catName is not None: + oprot.writeFieldBegin("catName", TType.STRING, 1) + oprot.writeString(self.catName.encode("utf-8") if sys.version_info[0] == 2 else self.catName) + oprot.writeFieldEnd() + if self.dbName is not None: + oprot.writeFieldBegin("dbName", TType.STRING, 2) + oprot.writeString(self.dbName.encode("utf-8") if sys.version_info[0] == 2 else self.dbName) + oprot.writeFieldEnd() + if self.tblName is not None: + oprot.writeFieldBegin("tblName", TType.STRING, 3) + oprot.writeString(self.tblName.encode("utf-8") if sys.version_info[0] == 2 else self.tblName) + oprot.writeFieldEnd() + if self.envContext is not None: + oprot.writeFieldBegin("envContext", TType.STRUCT, 4) + self.envContext.write(oprot) + oprot.writeFieldEnd() + if self.validWriteIdList is not None: + oprot.writeFieldBegin("validWriteIdList", TType.STRING, 5) + oprot.writeString(self.validWriteIdList.encode("utf-8") if sys.version_info[0] == 2 else self.validWriteIdList) + oprot.writeFieldEnd() + if self.id is not None: + oprot.writeFieldBegin("id", TType.I64, 6) + oprot.writeI64(self.id) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.dbName is None: + raise TProtocolException(message="Required field dbName is unset!") + if self.tblName is None: + raise TProtocolException(message="Required field tblName is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class GetSchemaResponse: + """ + Attributes: + - fields + + """ + + def __init__( + self, + fields=None, + ): + self.fields = fields + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.LIST: + self.fields = [] + (_etype1182, _size1179) = iprot.readListBegin() + for _i1183 in range(_size1179): + _elem1184 = FieldSchema() + _elem1184.read(iprot) + self.fields.append(_elem1184) + iprot.readListEnd() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("GetSchemaResponse") + if self.fields is not None: + oprot.writeFieldBegin("fields", TType.LIST, 1) + oprot.writeListBegin(TType.STRUCT, len(self.fields)) + for iter1185 in self.fields: + iter1185.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.fields is None: + raise TProtocolException(message="Required field fields is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class GetPartitionRequest: + """ + Attributes: + - catName + - dbName + - tblName + - partVals + - validWriteIdList + - id + + """ + + def __init__( + self, + catName=None, + dbName=None, + tblName=None, + partVals=None, + validWriteIdList=None, + id=-1, + ): + self.catName = catName + self.dbName = dbName + self.tblName = tblName + self.partVals = partVals + self.validWriteIdList = validWriteIdList + self.id = id + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.catName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.dbName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.tblName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.LIST: + self.partVals = [] + (_etype1189, _size1186) = iprot.readListBegin() + for _i1190 in range(_size1186): + _elem1191 = ( + iprot.readString().decode("utf-8", errors="replace") + if sys.version_info[0] == 2 + else iprot.readString() + ) + self.partVals.append(_elem1191) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.STRING: + self.validWriteIdList = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 6: + if ftype == TType.I64: + self.id = iprot.readI64() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("GetPartitionRequest") + if self.catName is not None: + oprot.writeFieldBegin("catName", TType.STRING, 1) + oprot.writeString(self.catName.encode("utf-8") if sys.version_info[0] == 2 else self.catName) + oprot.writeFieldEnd() + if self.dbName is not None: + oprot.writeFieldBegin("dbName", TType.STRING, 2) + oprot.writeString(self.dbName.encode("utf-8") if sys.version_info[0] == 2 else self.dbName) + oprot.writeFieldEnd() + if self.tblName is not None: + oprot.writeFieldBegin("tblName", TType.STRING, 3) + oprot.writeString(self.tblName.encode("utf-8") if sys.version_info[0] == 2 else self.tblName) + oprot.writeFieldEnd() + if self.partVals is not None: + oprot.writeFieldBegin("partVals", TType.LIST, 4) + oprot.writeListBegin(TType.STRING, len(self.partVals)) + for iter1192 in self.partVals: + oprot.writeString(iter1192.encode("utf-8") if sys.version_info[0] == 2 else iter1192) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.validWriteIdList is not None: + oprot.writeFieldBegin("validWriteIdList", TType.STRING, 5) + oprot.writeString(self.validWriteIdList.encode("utf-8") if sys.version_info[0] == 2 else self.validWriteIdList) + oprot.writeFieldEnd() + if self.id is not None: + oprot.writeFieldBegin("id", TType.I64, 6) + oprot.writeI64(self.id) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.dbName is None: + raise TProtocolException(message="Required field dbName is unset!") + if self.tblName is None: + raise TProtocolException(message="Required field tblName is unset!") + if self.partVals is None: + raise TProtocolException(message="Required field partVals is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class GetPartitionResponse: + """ + Attributes: + - partition + + """ + + def __init__( + self, + partition=None, + ): + self.partition = partition + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.partition = Partition() + self.partition.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("GetPartitionResponse") + if self.partition is not None: + oprot.writeFieldBegin("partition", TType.STRUCT, 1) + self.partition.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.partition is None: + raise TProtocolException(message="Required field partition is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class PartitionsRequest: + """ + Attributes: + - catName + - dbName + - tblName + - maxParts + - validWriteIdList + - id + + """ + + def __init__( + self, + catName=None, + dbName=None, + tblName=None, + maxParts=-1, + validWriteIdList=None, + id=-1, + ): + self.catName = catName + self.dbName = dbName + self.tblName = tblName + self.maxParts = maxParts + self.validWriteIdList = validWriteIdList + self.id = id + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.catName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.dbName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.tblName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.I16: + self.maxParts = iprot.readI16() + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.STRING: + self.validWriteIdList = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 6: + if ftype == TType.I64: + self.id = iprot.readI64() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("PartitionsRequest") + if self.catName is not None: + oprot.writeFieldBegin("catName", TType.STRING, 1) + oprot.writeString(self.catName.encode("utf-8") if sys.version_info[0] == 2 else self.catName) + oprot.writeFieldEnd() + if self.dbName is not None: + oprot.writeFieldBegin("dbName", TType.STRING, 2) + oprot.writeString(self.dbName.encode("utf-8") if sys.version_info[0] == 2 else self.dbName) + oprot.writeFieldEnd() + if self.tblName is not None: + oprot.writeFieldBegin("tblName", TType.STRING, 3) + oprot.writeString(self.tblName.encode("utf-8") if sys.version_info[0] == 2 else self.tblName) + oprot.writeFieldEnd() + if self.maxParts is not None: + oprot.writeFieldBegin("maxParts", TType.I16, 4) + oprot.writeI16(self.maxParts) + oprot.writeFieldEnd() + if self.validWriteIdList is not None: + oprot.writeFieldBegin("validWriteIdList", TType.STRING, 5) + oprot.writeString(self.validWriteIdList.encode("utf-8") if sys.version_info[0] == 2 else self.validWriteIdList) + oprot.writeFieldEnd() + if self.id is not None: + oprot.writeFieldBegin("id", TType.I64, 6) + oprot.writeI64(self.id) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.dbName is None: + raise TProtocolException(message="Required field dbName is unset!") + if self.tblName is None: + raise TProtocolException(message="Required field tblName is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class PartitionsResponse: + """ + Attributes: + - partitions + + """ + + def __init__( + self, + partitions=None, + ): + self.partitions = partitions + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.LIST: + self.partitions = [] + (_etype1196, _size1193) = iprot.readListBegin() + for _i1197 in range(_size1193): + _elem1198 = Partition() + _elem1198.read(iprot) + self.partitions.append(_elem1198) + iprot.readListEnd() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("PartitionsResponse") + if self.partitions is not None: + oprot.writeFieldBegin("partitions", TType.LIST, 1) + oprot.writeListBegin(TType.STRUCT, len(self.partitions)) + for iter1199 in self.partitions: + iter1199.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.partitions is None: + raise TProtocolException(message="Required field partitions is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class GetPartitionNamesPsRequest: + """ + Attributes: + - catName + - dbName + - tblName + - partValues + - maxParts + - validWriteIdList + - id + + """ + + def __init__( + self, + catName=None, + dbName=None, + tblName=None, + partValues=None, + maxParts=-1, + validWriteIdList=None, + id=-1, + ): + self.catName = catName + self.dbName = dbName + self.tblName = tblName + self.partValues = partValues + self.maxParts = maxParts + self.validWriteIdList = validWriteIdList + self.id = id + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.catName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.dbName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.tblName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.LIST: + self.partValues = [] + (_etype1203, _size1200) = iprot.readListBegin() + for _i1204 in range(_size1200): + _elem1205 = ( + iprot.readString().decode("utf-8", errors="replace") + if sys.version_info[0] == 2 + else iprot.readString() + ) + self.partValues.append(_elem1205) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.I16: + self.maxParts = iprot.readI16() + else: + iprot.skip(ftype) + elif fid == 6: + if ftype == TType.STRING: + self.validWriteIdList = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 7: + if ftype == TType.I64: + self.id = iprot.readI64() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("GetPartitionNamesPsRequest") + if self.catName is not None: + oprot.writeFieldBegin("catName", TType.STRING, 1) + oprot.writeString(self.catName.encode("utf-8") if sys.version_info[0] == 2 else self.catName) + oprot.writeFieldEnd() + if self.dbName is not None: + oprot.writeFieldBegin("dbName", TType.STRING, 2) + oprot.writeString(self.dbName.encode("utf-8") if sys.version_info[0] == 2 else self.dbName) + oprot.writeFieldEnd() + if self.tblName is not None: + oprot.writeFieldBegin("tblName", TType.STRING, 3) + oprot.writeString(self.tblName.encode("utf-8") if sys.version_info[0] == 2 else self.tblName) + oprot.writeFieldEnd() + if self.partValues is not None: + oprot.writeFieldBegin("partValues", TType.LIST, 4) + oprot.writeListBegin(TType.STRING, len(self.partValues)) + for iter1206 in self.partValues: + oprot.writeString(iter1206.encode("utf-8") if sys.version_info[0] == 2 else iter1206) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.maxParts is not None: + oprot.writeFieldBegin("maxParts", TType.I16, 5) + oprot.writeI16(self.maxParts) + oprot.writeFieldEnd() + if self.validWriteIdList is not None: + oprot.writeFieldBegin("validWriteIdList", TType.STRING, 6) + oprot.writeString(self.validWriteIdList.encode("utf-8") if sys.version_info[0] == 2 else self.validWriteIdList) + oprot.writeFieldEnd() + if self.id is not None: + oprot.writeFieldBegin("id", TType.I64, 7) + oprot.writeI64(self.id) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.dbName is None: + raise TProtocolException(message="Required field dbName is unset!") + if self.tblName is None: + raise TProtocolException(message="Required field tblName is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class GetPartitionNamesPsResponse: + """ + Attributes: + - names + + """ + + def __init__( + self, + names=None, + ): + self.names = names + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.LIST: + self.names = [] + (_etype1210, _size1207) = iprot.readListBegin() + for _i1211 in range(_size1207): + _elem1212 = ( + iprot.readString().decode("utf-8", errors="replace") + if sys.version_info[0] == 2 + else iprot.readString() + ) + self.names.append(_elem1212) + iprot.readListEnd() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("GetPartitionNamesPsResponse") + if self.names is not None: + oprot.writeFieldBegin("names", TType.LIST, 1) + oprot.writeListBegin(TType.STRING, len(self.names)) + for iter1213 in self.names: + oprot.writeString(iter1213.encode("utf-8") if sys.version_info[0] == 2 else iter1213) + oprot.writeListEnd() + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.names is None: + raise TProtocolException(message="Required field names is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class GetPartitionsPsWithAuthRequest: + """ + Attributes: + - catName + - dbName + - tblName + - partVals + - maxParts + - userName + - groupNames + - validWriteIdList + - id + + """ + + def __init__( + self, + catName=None, + dbName=None, + tblName=None, + partVals=None, + maxParts=-1, + userName=None, + groupNames=None, + validWriteIdList=None, + id=-1, + ): + self.catName = catName + self.dbName = dbName + self.tblName = tblName + self.partVals = partVals + self.maxParts = maxParts + self.userName = userName + self.groupNames = groupNames + self.validWriteIdList = validWriteIdList + self.id = id + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.catName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.dbName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.tblName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.LIST: + self.partVals = [] + (_etype1217, _size1214) = iprot.readListBegin() + for _i1218 in range(_size1214): + _elem1219 = ( + iprot.readString().decode("utf-8", errors="replace") + if sys.version_info[0] == 2 + else iprot.readString() + ) + self.partVals.append(_elem1219) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.I16: + self.maxParts = iprot.readI16() + else: + iprot.skip(ftype) + elif fid == 6: + if ftype == TType.STRING: + self.userName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 7: + if ftype == TType.LIST: + self.groupNames = [] + (_etype1223, _size1220) = iprot.readListBegin() + for _i1224 in range(_size1220): + _elem1225 = ( + iprot.readString().decode("utf-8", errors="replace") + if sys.version_info[0] == 2 + else iprot.readString() + ) + self.groupNames.append(_elem1225) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 8: + if ftype == TType.STRING: + self.validWriteIdList = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 9: + if ftype == TType.I64: + self.id = iprot.readI64() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("GetPartitionsPsWithAuthRequest") + if self.catName is not None: + oprot.writeFieldBegin("catName", TType.STRING, 1) + oprot.writeString(self.catName.encode("utf-8") if sys.version_info[0] == 2 else self.catName) + oprot.writeFieldEnd() + if self.dbName is not None: + oprot.writeFieldBegin("dbName", TType.STRING, 2) + oprot.writeString(self.dbName.encode("utf-8") if sys.version_info[0] == 2 else self.dbName) + oprot.writeFieldEnd() + if self.tblName is not None: + oprot.writeFieldBegin("tblName", TType.STRING, 3) + oprot.writeString(self.tblName.encode("utf-8") if sys.version_info[0] == 2 else self.tblName) + oprot.writeFieldEnd() + if self.partVals is not None: + oprot.writeFieldBegin("partVals", TType.LIST, 4) + oprot.writeListBegin(TType.STRING, len(self.partVals)) + for iter1226 in self.partVals: + oprot.writeString(iter1226.encode("utf-8") if sys.version_info[0] == 2 else iter1226) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.maxParts is not None: + oprot.writeFieldBegin("maxParts", TType.I16, 5) + oprot.writeI16(self.maxParts) + oprot.writeFieldEnd() + if self.userName is not None: + oprot.writeFieldBegin("userName", TType.STRING, 6) + oprot.writeString(self.userName.encode("utf-8") if sys.version_info[0] == 2 else self.userName) + oprot.writeFieldEnd() + if self.groupNames is not None: + oprot.writeFieldBegin("groupNames", TType.LIST, 7) + oprot.writeListBegin(TType.STRING, len(self.groupNames)) + for iter1227 in self.groupNames: + oprot.writeString(iter1227.encode("utf-8") if sys.version_info[0] == 2 else iter1227) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.validWriteIdList is not None: + oprot.writeFieldBegin("validWriteIdList", TType.STRING, 8) + oprot.writeString(self.validWriteIdList.encode("utf-8") if sys.version_info[0] == 2 else self.validWriteIdList) + oprot.writeFieldEnd() + if self.id is not None: + oprot.writeFieldBegin("id", TType.I64, 9) + oprot.writeI64(self.id) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.dbName is None: + raise TProtocolException(message="Required field dbName is unset!") + if self.tblName is None: + raise TProtocolException(message="Required field tblName is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class GetPartitionsPsWithAuthResponse: + """ + Attributes: + - partitions + + """ + + def __init__( + self, + partitions=None, + ): + self.partitions = partitions + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.LIST: + self.partitions = [] + (_etype1231, _size1228) = iprot.readListBegin() + for _i1232 in range(_size1228): + _elem1233 = Partition() + _elem1233.read(iprot) + self.partitions.append(_elem1233) + iprot.readListEnd() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("GetPartitionsPsWithAuthResponse") + if self.partitions is not None: + oprot.writeFieldBegin("partitions", TType.LIST, 1) + oprot.writeListBegin(TType.STRUCT, len(self.partitions)) + for iter1234 in self.partitions: + iter1234.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.partitions is None: + raise TProtocolException(message="Required field partitions is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class ReplicationMetrics: + """ + Attributes: + - scheduledExecutionId + - policy + - dumpExecutionId + - metadata + - progress + - messageFormat + + """ + + def __init__( + self, + scheduledExecutionId=None, + policy=None, + dumpExecutionId=None, + metadata=None, + progress=None, + messageFormat=None, + ): + self.scheduledExecutionId = scheduledExecutionId + self.policy = policy + self.dumpExecutionId = dumpExecutionId + self.metadata = metadata + self.progress = progress + self.messageFormat = messageFormat + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.I64: + self.scheduledExecutionId = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.policy = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.I64: + self.dumpExecutionId = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRING: + self.metadata = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.STRING: + self.progress = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 6: + if ftype == TType.STRING: + self.messageFormat = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("ReplicationMetrics") + if self.scheduledExecutionId is not None: + oprot.writeFieldBegin("scheduledExecutionId", TType.I64, 1) + oprot.writeI64(self.scheduledExecutionId) + oprot.writeFieldEnd() + if self.policy is not None: + oprot.writeFieldBegin("policy", TType.STRING, 2) + oprot.writeString(self.policy.encode("utf-8") if sys.version_info[0] == 2 else self.policy) + oprot.writeFieldEnd() + if self.dumpExecutionId is not None: + oprot.writeFieldBegin("dumpExecutionId", TType.I64, 3) + oprot.writeI64(self.dumpExecutionId) + oprot.writeFieldEnd() + if self.metadata is not None: + oprot.writeFieldBegin("metadata", TType.STRING, 4) + oprot.writeString(self.metadata.encode("utf-8") if sys.version_info[0] == 2 else self.metadata) + oprot.writeFieldEnd() + if self.progress is not None: + oprot.writeFieldBegin("progress", TType.STRING, 5) + oprot.writeString(self.progress.encode("utf-8") if sys.version_info[0] == 2 else self.progress) + oprot.writeFieldEnd() + if self.messageFormat is not None: + oprot.writeFieldBegin("messageFormat", TType.STRING, 6) + oprot.writeString(self.messageFormat.encode("utf-8") if sys.version_info[0] == 2 else self.messageFormat) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.scheduledExecutionId is None: + raise TProtocolException(message="Required field scheduledExecutionId is unset!") + if self.policy is None: + raise TProtocolException(message="Required field policy is unset!") + if self.dumpExecutionId is None: + raise TProtocolException(message="Required field dumpExecutionId is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class ReplicationMetricList: + """ + Attributes: + - replicationMetricList + + """ + + def __init__( + self, + replicationMetricList=None, + ): + self.replicationMetricList = replicationMetricList + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.LIST: + self.replicationMetricList = [] + (_etype1238, _size1235) = iprot.readListBegin() + for _i1239 in range(_size1235): + _elem1240 = ReplicationMetrics() + _elem1240.read(iprot) + self.replicationMetricList.append(_elem1240) + iprot.readListEnd() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("ReplicationMetricList") + if self.replicationMetricList is not None: + oprot.writeFieldBegin("replicationMetricList", TType.LIST, 1) + oprot.writeListBegin(TType.STRUCT, len(self.replicationMetricList)) + for iter1241 in self.replicationMetricList: + iter1241.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.replicationMetricList is None: + raise TProtocolException(message="Required field replicationMetricList is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class GetReplicationMetricsRequest: + """ + Attributes: + - scheduledExecutionId + - policy + - dumpExecutionId + + """ + + def __init__( + self, + scheduledExecutionId=None, + policy=None, + dumpExecutionId=None, + ): + self.scheduledExecutionId = scheduledExecutionId + self.policy = policy + self.dumpExecutionId = dumpExecutionId + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.I64: + self.scheduledExecutionId = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.policy = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.I64: + self.dumpExecutionId = iprot.readI64() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("GetReplicationMetricsRequest") + if self.scheduledExecutionId is not None: + oprot.writeFieldBegin("scheduledExecutionId", TType.I64, 1) + oprot.writeI64(self.scheduledExecutionId) + oprot.writeFieldEnd() + if self.policy is not None: + oprot.writeFieldBegin("policy", TType.STRING, 2) + oprot.writeString(self.policy.encode("utf-8") if sys.version_info[0] == 2 else self.policy) + oprot.writeFieldEnd() + if self.dumpExecutionId is not None: + oprot.writeFieldBegin("dumpExecutionId", TType.I64, 3) + oprot.writeI64(self.dumpExecutionId) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class GetOpenTxnsRequest: + """ + Attributes: + - excludeTxnTypes + + """ + + def __init__( + self, + excludeTxnTypes=None, + ): + self.excludeTxnTypes = excludeTxnTypes + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.LIST: + self.excludeTxnTypes = [] + (_etype1245, _size1242) = iprot.readListBegin() + for _i1246 in range(_size1242): + _elem1247 = iprot.readI32() + self.excludeTxnTypes.append(_elem1247) + iprot.readListEnd() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("GetOpenTxnsRequest") + if self.excludeTxnTypes is not None: + oprot.writeFieldBegin("excludeTxnTypes", TType.LIST, 1) + oprot.writeListBegin(TType.I32, len(self.excludeTxnTypes)) + for iter1248 in self.excludeTxnTypes: + oprot.writeI32(iter1248) + oprot.writeListEnd() + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class StoredProcedureRequest: + """ + Attributes: + - catName + - dbName + - procName + + """ + + def __init__( + self, + catName=None, + dbName=None, + procName=None, + ): + self.catName = catName + self.dbName = dbName + self.procName = procName + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.catName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.dbName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.procName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("StoredProcedureRequest") + if self.catName is not None: + oprot.writeFieldBegin("catName", TType.STRING, 1) + oprot.writeString(self.catName.encode("utf-8") if sys.version_info[0] == 2 else self.catName) + oprot.writeFieldEnd() + if self.dbName is not None: + oprot.writeFieldBegin("dbName", TType.STRING, 2) + oprot.writeString(self.dbName.encode("utf-8") if sys.version_info[0] == 2 else self.dbName) + oprot.writeFieldEnd() + if self.procName is not None: + oprot.writeFieldBegin("procName", TType.STRING, 3) + oprot.writeString(self.procName.encode("utf-8") if sys.version_info[0] == 2 else self.procName) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.catName is None: + raise TProtocolException(message="Required field catName is unset!") + if self.dbName is None: + raise TProtocolException(message="Required field dbName is unset!") + if self.procName is None: + raise TProtocolException(message="Required field procName is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class ListStoredProcedureRequest: + """ + Attributes: + - catName + - dbName + + """ + + def __init__( + self, + catName=None, + dbName=None, + ): + self.catName = catName + self.dbName = dbName + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.catName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.dbName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("ListStoredProcedureRequest") + if self.catName is not None: + oprot.writeFieldBegin("catName", TType.STRING, 1) + oprot.writeString(self.catName.encode("utf-8") if sys.version_info[0] == 2 else self.catName) + oprot.writeFieldEnd() + if self.dbName is not None: + oprot.writeFieldBegin("dbName", TType.STRING, 2) + oprot.writeString(self.dbName.encode("utf-8") if sys.version_info[0] == 2 else self.dbName) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.catName is None: + raise TProtocolException(message="Required field catName is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class StoredProcedure: + """ + Attributes: + - name + - dbName + - catName + - ownerName + - source + + """ + + def __init__( + self, + name=None, + dbName=None, + catName=None, + ownerName=None, + source=None, + ): + self.name = name + self.dbName = dbName + self.catName = catName + self.ownerName = ownerName + self.source = source + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.name = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.dbName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.catName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRING: + self.ownerName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.STRING: + self.source = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("StoredProcedure") + if self.name is not None: + oprot.writeFieldBegin("name", TType.STRING, 1) + oprot.writeString(self.name.encode("utf-8") if sys.version_info[0] == 2 else self.name) + oprot.writeFieldEnd() + if self.dbName is not None: + oprot.writeFieldBegin("dbName", TType.STRING, 2) + oprot.writeString(self.dbName.encode("utf-8") if sys.version_info[0] == 2 else self.dbName) + oprot.writeFieldEnd() + if self.catName is not None: + oprot.writeFieldBegin("catName", TType.STRING, 3) + oprot.writeString(self.catName.encode("utf-8") if sys.version_info[0] == 2 else self.catName) + oprot.writeFieldEnd() + if self.ownerName is not None: + oprot.writeFieldBegin("ownerName", TType.STRING, 4) + oprot.writeString(self.ownerName.encode("utf-8") if sys.version_info[0] == 2 else self.ownerName) + oprot.writeFieldEnd() + if self.source is not None: + oprot.writeFieldBegin("source", TType.STRING, 5) + oprot.writeString(self.source.encode("utf-8") if sys.version_info[0] == 2 else self.source) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class AddPackageRequest: + """ + Attributes: + - catName + - dbName + - packageName + - ownerName + - header + - body + + """ + + def __init__( + self, + catName=None, + dbName=None, + packageName=None, + ownerName=None, + header=None, + body=None, + ): + self.catName = catName + self.dbName = dbName + self.packageName = packageName + self.ownerName = ownerName + self.header = header + self.body = body + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.catName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.dbName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.packageName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRING: + self.ownerName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.STRING: + self.header = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 6: + if ftype == TType.STRING: + self.body = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("AddPackageRequest") + if self.catName is not None: + oprot.writeFieldBegin("catName", TType.STRING, 1) + oprot.writeString(self.catName.encode("utf-8") if sys.version_info[0] == 2 else self.catName) + oprot.writeFieldEnd() + if self.dbName is not None: + oprot.writeFieldBegin("dbName", TType.STRING, 2) + oprot.writeString(self.dbName.encode("utf-8") if sys.version_info[0] == 2 else self.dbName) + oprot.writeFieldEnd() + if self.packageName is not None: + oprot.writeFieldBegin("packageName", TType.STRING, 3) + oprot.writeString(self.packageName.encode("utf-8") if sys.version_info[0] == 2 else self.packageName) + oprot.writeFieldEnd() + if self.ownerName is not None: + oprot.writeFieldBegin("ownerName", TType.STRING, 4) + oprot.writeString(self.ownerName.encode("utf-8") if sys.version_info[0] == 2 else self.ownerName) + oprot.writeFieldEnd() + if self.header is not None: + oprot.writeFieldBegin("header", TType.STRING, 5) + oprot.writeString(self.header.encode("utf-8") if sys.version_info[0] == 2 else self.header) + oprot.writeFieldEnd() + if self.body is not None: + oprot.writeFieldBegin("body", TType.STRING, 6) + oprot.writeString(self.body.encode("utf-8") if sys.version_info[0] == 2 else self.body) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class GetPackageRequest: + """ + Attributes: + - catName + - dbName + - packageName + + """ + + def __init__( + self, + catName=None, + dbName=None, + packageName=None, + ): + self.catName = catName + self.dbName = dbName + self.packageName = packageName + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.catName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.dbName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.packageName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("GetPackageRequest") + if self.catName is not None: + oprot.writeFieldBegin("catName", TType.STRING, 1) + oprot.writeString(self.catName.encode("utf-8") if sys.version_info[0] == 2 else self.catName) + oprot.writeFieldEnd() + if self.dbName is not None: + oprot.writeFieldBegin("dbName", TType.STRING, 2) + oprot.writeString(self.dbName.encode("utf-8") if sys.version_info[0] == 2 else self.dbName) + oprot.writeFieldEnd() + if self.packageName is not None: + oprot.writeFieldBegin("packageName", TType.STRING, 3) + oprot.writeString(self.packageName.encode("utf-8") if sys.version_info[0] == 2 else self.packageName) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.catName is None: + raise TProtocolException(message="Required field catName is unset!") + if self.dbName is None: + raise TProtocolException(message="Required field dbName is unset!") + if self.packageName is None: + raise TProtocolException(message="Required field packageName is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class DropPackageRequest: + """ + Attributes: + - catName + - dbName + - packageName + + """ + + def __init__( + self, + catName=None, + dbName=None, + packageName=None, + ): + self.catName = catName + self.dbName = dbName + self.packageName = packageName + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.catName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.dbName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.packageName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("DropPackageRequest") + if self.catName is not None: + oprot.writeFieldBegin("catName", TType.STRING, 1) + oprot.writeString(self.catName.encode("utf-8") if sys.version_info[0] == 2 else self.catName) + oprot.writeFieldEnd() + if self.dbName is not None: + oprot.writeFieldBegin("dbName", TType.STRING, 2) + oprot.writeString(self.dbName.encode("utf-8") if sys.version_info[0] == 2 else self.dbName) + oprot.writeFieldEnd() + if self.packageName is not None: + oprot.writeFieldBegin("packageName", TType.STRING, 3) + oprot.writeString(self.packageName.encode("utf-8") if sys.version_info[0] == 2 else self.packageName) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.catName is None: + raise TProtocolException(message="Required field catName is unset!") + if self.dbName is None: + raise TProtocolException(message="Required field dbName is unset!") + if self.packageName is None: + raise TProtocolException(message="Required field packageName is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class ListPackageRequest: + """ + Attributes: + - catName + - dbName + + """ + + def __init__( + self, + catName=None, + dbName=None, + ): + self.catName = catName + self.dbName = dbName + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.catName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.dbName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("ListPackageRequest") + if self.catName is not None: + oprot.writeFieldBegin("catName", TType.STRING, 1) + oprot.writeString(self.catName.encode("utf-8") if sys.version_info[0] == 2 else self.catName) + oprot.writeFieldEnd() + if self.dbName is not None: + oprot.writeFieldBegin("dbName", TType.STRING, 2) + oprot.writeString(self.dbName.encode("utf-8") if sys.version_info[0] == 2 else self.dbName) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.catName is None: + raise TProtocolException(message="Required field catName is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class Package: + """ + Attributes: + - catName + - dbName + - packageName + - ownerName + - header + - body + + """ + + def __init__( + self, + catName=None, + dbName=None, + packageName=None, + ownerName=None, + header=None, + body=None, + ): + self.catName = catName + self.dbName = dbName + self.packageName = packageName + self.ownerName = ownerName + self.header = header + self.body = body + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.catName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.dbName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.packageName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRING: + self.ownerName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.STRING: + self.header = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 6: + if ftype == TType.STRING: + self.body = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("Package") + if self.catName is not None: + oprot.writeFieldBegin("catName", TType.STRING, 1) + oprot.writeString(self.catName.encode("utf-8") if sys.version_info[0] == 2 else self.catName) + oprot.writeFieldEnd() + if self.dbName is not None: + oprot.writeFieldBegin("dbName", TType.STRING, 2) + oprot.writeString(self.dbName.encode("utf-8") if sys.version_info[0] == 2 else self.dbName) + oprot.writeFieldEnd() + if self.packageName is not None: + oprot.writeFieldBegin("packageName", TType.STRING, 3) + oprot.writeString(self.packageName.encode("utf-8") if sys.version_info[0] == 2 else self.packageName) + oprot.writeFieldEnd() + if self.ownerName is not None: + oprot.writeFieldBegin("ownerName", TType.STRING, 4) + oprot.writeString(self.ownerName.encode("utf-8") if sys.version_info[0] == 2 else self.ownerName) + oprot.writeFieldEnd() + if self.header is not None: + oprot.writeFieldBegin("header", TType.STRING, 5) + oprot.writeString(self.header.encode("utf-8") if sys.version_info[0] == 2 else self.header) + oprot.writeFieldEnd() + if self.body is not None: + oprot.writeFieldBegin("body", TType.STRING, 6) + oprot.writeString(self.body.encode("utf-8") if sys.version_info[0] == 2 else self.body) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class GetAllWriteEventInfoRequest: + """ + Attributes: + - txnId + - dbName + - tableName + + """ + + def __init__( + self, + txnId=None, + dbName=None, + tableName=None, + ): + self.txnId = txnId + self.dbName = dbName + self.tableName = tableName + + def read(self, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and self.thrift_spec is not None + ): + iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.I64: + self.txnId = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.dbName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.tableName = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("GetAllWriteEventInfoRequest") + if self.txnId is not None: + oprot.writeFieldBegin("txnId", TType.I64, 1) + oprot.writeI64(self.txnId) + oprot.writeFieldEnd() + if self.dbName is not None: + oprot.writeFieldBegin("dbName", TType.STRING, 2) + oprot.writeString(self.dbName.encode("utf-8") if sys.version_info[0] == 2 else self.dbName) + oprot.writeFieldEnd() + if self.tableName is not None: + oprot.writeFieldBegin("tableName", TType.STRING, 3) + oprot.writeString(self.tableName.encode("utf-8") if sys.version_info[0] == 2 else self.tableName) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.txnId is None: + raise TProtocolException(message="Required field txnId is unset!") + return + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class MetaException(TException): + """ + Attributes: + - message + + """ + + def __init__( + self, + message=None, + ): + super().__setattr__("message", message) + + def __setattr__(self, *args): + raise TypeError("can't modify immutable instance") + + def __delattr__(self, *args): + raise TypeError("can't modify immutable instance") + + def __hash__(self): + return hash(self.__class__) ^ hash((self.message,)) + + @classmethod + def read(cls, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and cls.thrift_spec is not None + ): + return iprot._fast_decode(None, iprot, [cls, cls.thrift_spec]) + iprot.readStructBegin() + message = None + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + message = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + return cls( + message=message, + ) + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("MetaException") + if self.message is not None: + oprot.writeFieldBegin("message", TType.STRING, 1) + oprot.writeString(self.message.encode("utf-8") if sys.version_info[0] == 2 else self.message) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __str__(self): + return repr(self) + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class UnknownTableException(TException): + """ + Attributes: + - message + + """ + + def __init__( + self, + message=None, + ): + super().__setattr__("message", message) + + def __setattr__(self, *args): + raise TypeError("can't modify immutable instance") + + def __delattr__(self, *args): + raise TypeError("can't modify immutable instance") + + def __hash__(self): + return hash(self.__class__) ^ hash((self.message,)) + + @classmethod + def read(cls, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and cls.thrift_spec is not None + ): + return iprot._fast_decode(None, iprot, [cls, cls.thrift_spec]) + iprot.readStructBegin() + message = None + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + message = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + return cls( + message=message, + ) + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("UnknownTableException") + if self.message is not None: + oprot.writeFieldBegin("message", TType.STRING, 1) + oprot.writeString(self.message.encode("utf-8") if sys.version_info[0] == 2 else self.message) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __str__(self): + return repr(self) + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class UnknownDBException(TException): + """ + Attributes: + - message + + """ + + def __init__( + self, + message=None, + ): + super().__setattr__("message", message) + + def __setattr__(self, *args): + raise TypeError("can't modify immutable instance") + + def __delattr__(self, *args): + raise TypeError("can't modify immutable instance") + + def __hash__(self): + return hash(self.__class__) ^ hash((self.message,)) + + @classmethod + def read(cls, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and cls.thrift_spec is not None + ): + return iprot._fast_decode(None, iprot, [cls, cls.thrift_spec]) + iprot.readStructBegin() + message = None + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + message = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + return cls( + message=message, + ) + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("UnknownDBException") + if self.message is not None: + oprot.writeFieldBegin("message", TType.STRING, 1) + oprot.writeString(self.message.encode("utf-8") if sys.version_info[0] == 2 else self.message) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __str__(self): + return repr(self) + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class AlreadyExistsException(TException): + """ + Attributes: + - message + + """ + + def __init__( + self, + message=None, + ): + super().__setattr__("message", message) + + def __setattr__(self, *args): + raise TypeError("can't modify immutable instance") + + def __delattr__(self, *args): + raise TypeError("can't modify immutable instance") + + def __hash__(self): + return hash(self.__class__) ^ hash((self.message,)) + + @classmethod + def read(cls, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and cls.thrift_spec is not None + ): + return iprot._fast_decode(None, iprot, [cls, cls.thrift_spec]) + iprot.readStructBegin() + message = None + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + message = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + return cls( + message=message, + ) + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("AlreadyExistsException") + if self.message is not None: + oprot.writeFieldBegin("message", TType.STRING, 1) + oprot.writeString(self.message.encode("utf-8") if sys.version_info[0] == 2 else self.message) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __str__(self): + return repr(self) + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class InvalidPartitionException(TException): + """ + Attributes: + - message + + """ + + def __init__( + self, + message=None, + ): + super().__setattr__("message", message) + + def __setattr__(self, *args): + raise TypeError("can't modify immutable instance") + + def __delattr__(self, *args): + raise TypeError("can't modify immutable instance") + + def __hash__(self): + return hash(self.__class__) ^ hash((self.message,)) + + @classmethod + def read(cls, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and cls.thrift_spec is not None + ): + return iprot._fast_decode(None, iprot, [cls, cls.thrift_spec]) + iprot.readStructBegin() + message = None + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + message = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + return cls( + message=message, + ) + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("InvalidPartitionException") + if self.message is not None: + oprot.writeFieldBegin("message", TType.STRING, 1) + oprot.writeString(self.message.encode("utf-8") if sys.version_info[0] == 2 else self.message) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __str__(self): + return repr(self) + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class UnknownPartitionException(TException): + """ + Attributes: + - message + + """ + + def __init__( + self, + message=None, + ): + super().__setattr__("message", message) + + def __setattr__(self, *args): + raise TypeError("can't modify immutable instance") + + def __delattr__(self, *args): + raise TypeError("can't modify immutable instance") + + def __hash__(self): + return hash(self.__class__) ^ hash((self.message,)) + + @classmethod + def read(cls, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and cls.thrift_spec is not None + ): + return iprot._fast_decode(None, iprot, [cls, cls.thrift_spec]) + iprot.readStructBegin() + message = None + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + message = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + return cls( + message=message, + ) + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("UnknownPartitionException") + if self.message is not None: + oprot.writeFieldBegin("message", TType.STRING, 1) + oprot.writeString(self.message.encode("utf-8") if sys.version_info[0] == 2 else self.message) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __str__(self): + return repr(self) + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class InvalidObjectException(TException): + """ + Attributes: + - message + + """ + + def __init__( + self, + message=None, + ): + super().__setattr__("message", message) + + def __setattr__(self, *args): + raise TypeError("can't modify immutable instance") + + def __delattr__(self, *args): + raise TypeError("can't modify immutable instance") + + def __hash__(self): + return hash(self.__class__) ^ hash((self.message,)) + + @classmethod + def read(cls, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and cls.thrift_spec is not None + ): + return iprot._fast_decode(None, iprot, [cls, cls.thrift_spec]) + iprot.readStructBegin() + message = None + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + message = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + return cls( + message=message, + ) + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("InvalidObjectException") + if self.message is not None: + oprot.writeFieldBegin("message", TType.STRING, 1) + oprot.writeString(self.message.encode("utf-8") if sys.version_info[0] == 2 else self.message) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __str__(self): + return repr(self) + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class NoSuchObjectException(TException): + """ + Attributes: + - message + + """ + + def __init__( + self, + message=None, + ): + super().__setattr__("message", message) + + def __setattr__(self, *args): + raise TypeError("can't modify immutable instance") + + def __delattr__(self, *args): + raise TypeError("can't modify immutable instance") + + def __hash__(self): + return hash(self.__class__) ^ hash((self.message,)) + + @classmethod + def read(cls, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and cls.thrift_spec is not None + ): + return iprot._fast_decode(None, iprot, [cls, cls.thrift_spec]) + iprot.readStructBegin() + message = None + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + message = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + return cls( + message=message, + ) + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("NoSuchObjectException") + if self.message is not None: + oprot.writeFieldBegin("message", TType.STRING, 1) + oprot.writeString(self.message.encode("utf-8") if sys.version_info[0] == 2 else self.message) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __str__(self): + return repr(self) + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class InvalidOperationException(TException): + """ + Attributes: + - message + + """ + + def __init__( + self, + message=None, + ): + super().__setattr__("message", message) + + def __setattr__(self, *args): + raise TypeError("can't modify immutable instance") + + def __delattr__(self, *args): + raise TypeError("can't modify immutable instance") + + def __hash__(self): + return hash(self.__class__) ^ hash((self.message,)) + + @classmethod + def read(cls, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and cls.thrift_spec is not None + ): + return iprot._fast_decode(None, iprot, [cls, cls.thrift_spec]) + iprot.readStructBegin() + message = None + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + message = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + return cls( + message=message, + ) + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("InvalidOperationException") + if self.message is not None: + oprot.writeFieldBegin("message", TType.STRING, 1) + oprot.writeString(self.message.encode("utf-8") if sys.version_info[0] == 2 else self.message) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __str__(self): + return repr(self) + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class ConfigValSecurityException(TException): + """ + Attributes: + - message + + """ + + def __init__( + self, + message=None, + ): + super().__setattr__("message", message) + + def __setattr__(self, *args): + raise TypeError("can't modify immutable instance") + + def __delattr__(self, *args): + raise TypeError("can't modify immutable instance") + + def __hash__(self): + return hash(self.__class__) ^ hash((self.message,)) + + @classmethod + def read(cls, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and cls.thrift_spec is not None + ): + return iprot._fast_decode(None, iprot, [cls, cls.thrift_spec]) + iprot.readStructBegin() + message = None + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + message = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + return cls( + message=message, + ) + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("ConfigValSecurityException") + if self.message is not None: + oprot.writeFieldBegin("message", TType.STRING, 1) + oprot.writeString(self.message.encode("utf-8") if sys.version_info[0] == 2 else self.message) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __str__(self): + return repr(self) + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class InvalidInputException(TException): + """ + Attributes: + - message + + """ + + def __init__( + self, + message=None, + ): + super().__setattr__("message", message) + + def __setattr__(self, *args): + raise TypeError("can't modify immutable instance") + + def __delattr__(self, *args): + raise TypeError("can't modify immutable instance") + + def __hash__(self): + return hash(self.__class__) ^ hash((self.message,)) + + @classmethod + def read(cls, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and cls.thrift_spec is not None + ): + return iprot._fast_decode(None, iprot, [cls, cls.thrift_spec]) + iprot.readStructBegin() + message = None + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + message = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + return cls( + message=message, + ) + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("InvalidInputException") + if self.message is not None: + oprot.writeFieldBegin("message", TType.STRING, 1) + oprot.writeString(self.message.encode("utf-8") if sys.version_info[0] == 2 else self.message) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __str__(self): + return repr(self) + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class NoSuchTxnException(TException): + """ + Attributes: + - message + + """ + + def __init__( + self, + message=None, + ): + super().__setattr__("message", message) + + def __setattr__(self, *args): + raise TypeError("can't modify immutable instance") + + def __delattr__(self, *args): + raise TypeError("can't modify immutable instance") + + def __hash__(self): + return hash(self.__class__) ^ hash((self.message,)) + + @classmethod + def read(cls, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and cls.thrift_spec is not None + ): + return iprot._fast_decode(None, iprot, [cls, cls.thrift_spec]) + iprot.readStructBegin() + message = None + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + message = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + return cls( + message=message, + ) + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("NoSuchTxnException") + if self.message is not None: + oprot.writeFieldBegin("message", TType.STRING, 1) + oprot.writeString(self.message.encode("utf-8") if sys.version_info[0] == 2 else self.message) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __str__(self): + return repr(self) + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class TxnAbortedException(TException): + """ + Attributes: + - message + + """ + + def __init__( + self, + message=None, + ): + super().__setattr__("message", message) + + def __setattr__(self, *args): + raise TypeError("can't modify immutable instance") + + def __delattr__(self, *args): + raise TypeError("can't modify immutable instance") + + def __hash__(self): + return hash(self.__class__) ^ hash((self.message,)) + + @classmethod + def read(cls, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and cls.thrift_spec is not None + ): + return iprot._fast_decode(None, iprot, [cls, cls.thrift_spec]) + iprot.readStructBegin() + message = None + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + message = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + return cls( + message=message, + ) + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("TxnAbortedException") + if self.message is not None: + oprot.writeFieldBegin("message", TType.STRING, 1) + oprot.writeString(self.message.encode("utf-8") if sys.version_info[0] == 2 else self.message) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __str__(self): + return repr(self) + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class TxnOpenException(TException): + """ + Attributes: + - message + + """ + + def __init__( + self, + message=None, + ): + super().__setattr__("message", message) + + def __setattr__(self, *args): + raise TypeError("can't modify immutable instance") + + def __delattr__(self, *args): + raise TypeError("can't modify immutable instance") + + def __hash__(self): + return hash(self.__class__) ^ hash((self.message,)) + + @classmethod + def read(cls, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and cls.thrift_spec is not None + ): + return iprot._fast_decode(None, iprot, [cls, cls.thrift_spec]) + iprot.readStructBegin() + message = None + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + message = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + return cls( + message=message, + ) + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("TxnOpenException") + if self.message is not None: + oprot.writeFieldBegin("message", TType.STRING, 1) + oprot.writeString(self.message.encode("utf-8") if sys.version_info[0] == 2 else self.message) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __str__(self): + return repr(self) + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +class NoSuchLockException(TException): + """ + Attributes: + - message + + """ + + def __init__( + self, + message=None, + ): + super().__setattr__("message", message) + + def __setattr__(self, *args): + raise TypeError("can't modify immutable instance") + + def __delattr__(self, *args): + raise TypeError("can't modify immutable instance") + + def __hash__(self): + return hash(self.__class__) ^ hash((self.message,)) + + @classmethod + def read(cls, iprot): + if ( + iprot._fast_decode is not None + and isinstance(iprot.trans, TTransport.CReadableTransport) + and cls.thrift_spec is not None + ): + return iprot._fast_decode(None, iprot, [cls, cls.thrift_spec]) + iprot.readStructBegin() + message = None + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + message = ( + iprot.readString().decode("utf-8", errors="replace") if sys.version_info[0] == 2 else iprot.readString() + ) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + return cls( + message=message, + ) + + def write(self, oprot): + if oprot._fast_encode is not None and self.thrift_spec is not None: + oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) + return + oprot.writeStructBegin("NoSuchLockException") + if self.message is not None: + oprot.writeFieldBegin("message", TType.STRING, 1) + oprot.writeString(self.message.encode("utf-8") if sys.version_info[0] == 2 else self.message) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __str__(self): + return repr(self) + + def __repr__(self): + L = ["{}={!r}".format(key, value) for key, value in self.__dict__.items()] + return "{}({})".format(self.__class__.__name__, ", ".join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + +all_structs.append(Version) +Version.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "version", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "comments", + "UTF8", + None, + ), # 2 +) +all_structs.append(FieldSchema) +FieldSchema.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "name", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "type", + "UTF8", + None, + ), # 2 + ( + 3, + TType.STRING, + "comment", + "UTF8", + None, + ), # 3 +) +all_structs.append(EnvironmentContext) +EnvironmentContext.thrift_spec = ( + None, # 0 + ( + 1, + TType.MAP, + "properties", + (TType.STRING, "UTF8", TType.STRING, "UTF8", False), + None, + ), # 1 +) +all_structs.append(SQLPrimaryKey) +SQLPrimaryKey.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "table_db", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "table_name", + "UTF8", + None, + ), # 2 + ( + 3, + TType.STRING, + "column_name", + "UTF8", + None, + ), # 3 + ( + 4, + TType.I32, + "key_seq", + None, + None, + ), # 4 + ( + 5, + TType.STRING, + "pk_name", + "UTF8", + None, + ), # 5 + ( + 6, + TType.BOOL, + "enable_cstr", + None, + None, + ), # 6 + ( + 7, + TType.BOOL, + "validate_cstr", + None, + None, + ), # 7 + ( + 8, + TType.BOOL, + "rely_cstr", + None, + None, + ), # 8 + ( + 9, + TType.STRING, + "catName", + "UTF8", + None, + ), # 9 +) +all_structs.append(SQLForeignKey) +SQLForeignKey.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "pktable_db", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "pktable_name", + "UTF8", + None, + ), # 2 + ( + 3, + TType.STRING, + "pkcolumn_name", + "UTF8", + None, + ), # 3 + ( + 4, + TType.STRING, + "fktable_db", + "UTF8", + None, + ), # 4 + ( + 5, + TType.STRING, + "fktable_name", + "UTF8", + None, + ), # 5 + ( + 6, + TType.STRING, + "fkcolumn_name", + "UTF8", + None, + ), # 6 + ( + 7, + TType.I32, + "key_seq", + None, + None, + ), # 7 + ( + 8, + TType.I32, + "update_rule", + None, + None, + ), # 8 + ( + 9, + TType.I32, + "delete_rule", + None, + None, + ), # 9 + ( + 10, + TType.STRING, + "fk_name", + "UTF8", + None, + ), # 10 + ( + 11, + TType.STRING, + "pk_name", + "UTF8", + None, + ), # 11 + ( + 12, + TType.BOOL, + "enable_cstr", + None, + None, + ), # 12 + ( + 13, + TType.BOOL, + "validate_cstr", + None, + None, + ), # 13 + ( + 14, + TType.BOOL, + "rely_cstr", + None, + None, + ), # 14 + ( + 15, + TType.STRING, + "catName", + "UTF8", + None, + ), # 15 +) +all_structs.append(SQLUniqueConstraint) +SQLUniqueConstraint.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "catName", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "table_db", + "UTF8", + None, + ), # 2 + ( + 3, + TType.STRING, + "table_name", + "UTF8", + None, + ), # 3 + ( + 4, + TType.STRING, + "column_name", + "UTF8", + None, + ), # 4 + ( + 5, + TType.I32, + "key_seq", + None, + None, + ), # 5 + ( + 6, + TType.STRING, + "uk_name", + "UTF8", + None, + ), # 6 + ( + 7, + TType.BOOL, + "enable_cstr", + None, + None, + ), # 7 + ( + 8, + TType.BOOL, + "validate_cstr", + None, + None, + ), # 8 + ( + 9, + TType.BOOL, + "rely_cstr", + None, + None, + ), # 9 +) +all_structs.append(SQLNotNullConstraint) +SQLNotNullConstraint.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "catName", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "table_db", + "UTF8", + None, + ), # 2 + ( + 3, + TType.STRING, + "table_name", + "UTF8", + None, + ), # 3 + ( + 4, + TType.STRING, + "column_name", + "UTF8", + None, + ), # 4 + ( + 5, + TType.STRING, + "nn_name", + "UTF8", + None, + ), # 5 + ( + 6, + TType.BOOL, + "enable_cstr", + None, + None, + ), # 6 + ( + 7, + TType.BOOL, + "validate_cstr", + None, + None, + ), # 7 + ( + 8, + TType.BOOL, + "rely_cstr", + None, + None, + ), # 8 +) +all_structs.append(SQLDefaultConstraint) +SQLDefaultConstraint.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "catName", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "table_db", + "UTF8", + None, + ), # 2 + ( + 3, + TType.STRING, + "table_name", + "UTF8", + None, + ), # 3 + ( + 4, + TType.STRING, + "column_name", + "UTF8", + None, + ), # 4 + ( + 5, + TType.STRING, + "default_value", + "UTF8", + None, + ), # 5 + ( + 6, + TType.STRING, + "dc_name", + "UTF8", + None, + ), # 6 + ( + 7, + TType.BOOL, + "enable_cstr", + None, + None, + ), # 7 + ( + 8, + TType.BOOL, + "validate_cstr", + None, + None, + ), # 8 + ( + 9, + TType.BOOL, + "rely_cstr", + None, + None, + ), # 9 +) +all_structs.append(SQLCheckConstraint) +SQLCheckConstraint.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "catName", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "table_db", + "UTF8", + None, + ), # 2 + ( + 3, + TType.STRING, + "table_name", + "UTF8", + None, + ), # 3 + ( + 4, + TType.STRING, + "column_name", + "UTF8", + None, + ), # 4 + ( + 5, + TType.STRING, + "check_expression", + "UTF8", + None, + ), # 5 + ( + 6, + TType.STRING, + "dc_name", + "UTF8", + None, + ), # 6 + ( + 7, + TType.BOOL, + "enable_cstr", + None, + None, + ), # 7 + ( + 8, + TType.BOOL, + "validate_cstr", + None, + None, + ), # 8 + ( + 9, + TType.BOOL, + "rely_cstr", + None, + None, + ), # 9 +) +all_structs.append(SQLAllTableConstraints) +SQLAllTableConstraints.thrift_spec = ( + None, # 0 + ( + 1, + TType.LIST, + "primaryKeys", + (TType.STRUCT, [SQLPrimaryKey, None], False), + None, + ), # 1 + ( + 2, + TType.LIST, + "foreignKeys", + (TType.STRUCT, [SQLForeignKey, None], False), + None, + ), # 2 + ( + 3, + TType.LIST, + "uniqueConstraints", + (TType.STRUCT, [SQLUniqueConstraint, None], False), + None, + ), # 3 + ( + 4, + TType.LIST, + "notNullConstraints", + (TType.STRUCT, [SQLNotNullConstraint, None], False), + None, + ), # 4 + ( + 5, + TType.LIST, + "defaultConstraints", + (TType.STRUCT, [SQLDefaultConstraint, None], False), + None, + ), # 5 + ( + 6, + TType.LIST, + "checkConstraints", + (TType.STRUCT, [SQLCheckConstraint, None], False), + None, + ), # 6 +) +all_structs.append(Type) +Type.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "name", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "type1", + "UTF8", + None, + ), # 2 + ( + 3, + TType.STRING, + "type2", + "UTF8", + None, + ), # 3 + ( + 4, + TType.LIST, + "fields", + (TType.STRUCT, [FieldSchema, None], False), + None, + ), # 4 +) +all_structs.append(HiveObjectRef) +HiveObjectRef.thrift_spec = ( + None, # 0 + ( + 1, + TType.I32, + "objectType", + None, + None, + ), # 1 + ( + 2, + TType.STRING, + "dbName", + "UTF8", + None, + ), # 2 + ( + 3, + TType.STRING, + "objectName", + "UTF8", + None, + ), # 3 + ( + 4, + TType.LIST, + "partValues", + (TType.STRING, "UTF8", False), + None, + ), # 4 + ( + 5, + TType.STRING, + "columnName", + "UTF8", + None, + ), # 5 + ( + 6, + TType.STRING, + "catName", + "UTF8", + None, + ), # 6 +) +all_structs.append(PrivilegeGrantInfo) +PrivilegeGrantInfo.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "privilege", + "UTF8", + None, + ), # 1 + ( + 2, + TType.I32, + "createTime", + None, + None, + ), # 2 + ( + 3, + TType.STRING, + "grantor", + "UTF8", + None, + ), # 3 + ( + 4, + TType.I32, + "grantorType", + None, + None, + ), # 4 + ( + 5, + TType.BOOL, + "grantOption", + None, + None, + ), # 5 +) +all_structs.append(HiveObjectPrivilege) +HiveObjectPrivilege.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "hiveObject", + [HiveObjectRef, None], + None, + ), # 1 + ( + 2, + TType.STRING, + "principalName", + "UTF8", + None, + ), # 2 + ( + 3, + TType.I32, + "principalType", + None, + None, + ), # 3 + ( + 4, + TType.STRUCT, + "grantInfo", + [PrivilegeGrantInfo, None], + None, + ), # 4 + ( + 5, + TType.STRING, + "authorizer", + "UTF8", + None, + ), # 5 +) +all_structs.append(PrivilegeBag) +PrivilegeBag.thrift_spec = ( + None, # 0 + ( + 1, + TType.LIST, + "privileges", + (TType.STRUCT, [HiveObjectPrivilege, None], False), + None, + ), # 1 +) +all_structs.append(PrincipalPrivilegeSet) +PrincipalPrivilegeSet.thrift_spec = ( + None, # 0 + ( + 1, + TType.MAP, + "userPrivileges", + (TType.STRING, "UTF8", TType.LIST, (TType.STRUCT, [PrivilegeGrantInfo, None], False), False), + None, + ), # 1 + ( + 2, + TType.MAP, + "groupPrivileges", + (TType.STRING, "UTF8", TType.LIST, (TType.STRUCT, [PrivilegeGrantInfo, None], False), False), + None, + ), # 2 + ( + 3, + TType.MAP, + "rolePrivileges", + (TType.STRING, "UTF8", TType.LIST, (TType.STRUCT, [PrivilegeGrantInfo, None], False), False), + None, + ), # 3 +) +all_structs.append(GrantRevokePrivilegeRequest) +GrantRevokePrivilegeRequest.thrift_spec = ( + None, # 0 + ( + 1, + TType.I32, + "requestType", + None, + None, + ), # 1 + ( + 2, + TType.STRUCT, + "privileges", + [PrivilegeBag, None], + None, + ), # 2 + ( + 3, + TType.BOOL, + "revokeGrantOption", + None, + None, + ), # 3 +) +all_structs.append(GrantRevokePrivilegeResponse) +GrantRevokePrivilegeResponse.thrift_spec = ( + None, # 0 + ( + 1, + TType.BOOL, + "success", + None, + None, + ), # 1 +) +all_structs.append(TruncateTableRequest) +TruncateTableRequest.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "dbName", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "tableName", + "UTF8", + None, + ), # 2 + ( + 3, + TType.LIST, + "partNames", + (TType.STRING, "UTF8", False), + None, + ), # 3 + ( + 4, + TType.I64, + "writeId", + None, + -1, + ), # 4 + ( + 5, + TType.STRING, + "validWriteIdList", + "UTF8", + None, + ), # 5 + ( + 6, + TType.STRUCT, + "environmentContext", + [EnvironmentContext, None], + None, + ), # 6 +) +all_structs.append(TruncateTableResponse) +TruncateTableResponse.thrift_spec = () +all_structs.append(Role) +Role.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "roleName", + "UTF8", + None, + ), # 1 + ( + 2, + TType.I32, + "createTime", + None, + None, + ), # 2 + ( + 3, + TType.STRING, + "ownerName", + "UTF8", + None, + ), # 3 +) +all_structs.append(RolePrincipalGrant) +RolePrincipalGrant.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "roleName", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "principalName", + "UTF8", + None, + ), # 2 + ( + 3, + TType.I32, + "principalType", + None, + None, + ), # 3 + ( + 4, + TType.BOOL, + "grantOption", + None, + None, + ), # 4 + ( + 5, + TType.I32, + "grantTime", + None, + None, + ), # 5 + ( + 6, + TType.STRING, + "grantorName", + "UTF8", + None, + ), # 6 + ( + 7, + TType.I32, + "grantorPrincipalType", + None, + None, + ), # 7 +) +all_structs.append(GetRoleGrantsForPrincipalRequest) +GetRoleGrantsForPrincipalRequest.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "principal_name", + "UTF8", + None, + ), # 1 + ( + 2, + TType.I32, + "principal_type", + None, + None, + ), # 2 +) +all_structs.append(GetRoleGrantsForPrincipalResponse) +GetRoleGrantsForPrincipalResponse.thrift_spec = ( + None, # 0 + ( + 1, + TType.LIST, + "principalGrants", + (TType.STRUCT, [RolePrincipalGrant, None], False), + None, + ), # 1 +) +all_structs.append(GetPrincipalsInRoleRequest) +GetPrincipalsInRoleRequest.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "roleName", + "UTF8", + None, + ), # 1 +) +all_structs.append(GetPrincipalsInRoleResponse) +GetPrincipalsInRoleResponse.thrift_spec = ( + None, # 0 + ( + 1, + TType.LIST, + "principalGrants", + (TType.STRUCT, [RolePrincipalGrant, None], False), + None, + ), # 1 +) +all_structs.append(GrantRevokeRoleRequest) +GrantRevokeRoleRequest.thrift_spec = ( + None, # 0 + ( + 1, + TType.I32, + "requestType", + None, + None, + ), # 1 + ( + 2, + TType.STRING, + "roleName", + "UTF8", + None, + ), # 2 + ( + 3, + TType.STRING, + "principalName", + "UTF8", + None, + ), # 3 + ( + 4, + TType.I32, + "principalType", + None, + None, + ), # 4 + ( + 5, + TType.STRING, + "grantor", + "UTF8", + None, + ), # 5 + ( + 6, + TType.I32, + "grantorType", + None, + None, + ), # 6 + ( + 7, + TType.BOOL, + "grantOption", + None, + None, + ), # 7 +) +all_structs.append(GrantRevokeRoleResponse) +GrantRevokeRoleResponse.thrift_spec = ( + None, # 0 + ( + 1, + TType.BOOL, + "success", + None, + None, + ), # 1 +) +all_structs.append(Catalog) +Catalog.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "name", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "description", + "UTF8", + None, + ), # 2 + ( + 3, + TType.STRING, + "locationUri", + "UTF8", + None, + ), # 3 + ( + 4, + TType.I32, + "createTime", + None, + None, + ), # 4 +) +all_structs.append(CreateCatalogRequest) +CreateCatalogRequest.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "catalog", + [Catalog, None], + None, + ), # 1 +) +all_structs.append(AlterCatalogRequest) +AlterCatalogRequest.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "name", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRUCT, + "newCat", + [Catalog, None], + None, + ), # 2 +) +all_structs.append(GetCatalogRequest) +GetCatalogRequest.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "name", + "UTF8", + None, + ), # 1 +) +all_structs.append(GetCatalogResponse) +GetCatalogResponse.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "catalog", + [Catalog, None], + None, + ), # 1 +) +all_structs.append(GetCatalogsResponse) +GetCatalogsResponse.thrift_spec = ( + None, # 0 + ( + 1, + TType.LIST, + "names", + (TType.STRING, "UTF8", False), + None, + ), # 1 +) +all_structs.append(DropCatalogRequest) +DropCatalogRequest.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "name", + "UTF8", + None, + ), # 1 +) +all_structs.append(Database) +Database.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "name", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "description", + "UTF8", + None, + ), # 2 + ( + 3, + TType.STRING, + "locationUri", + "UTF8", + None, + ), # 3 + ( + 4, + TType.MAP, + "parameters", + (TType.STRING, "UTF8", TType.STRING, "UTF8", False), + None, + ), # 4 + ( + 5, + TType.STRUCT, + "privileges", + [PrincipalPrivilegeSet, None], + None, + ), # 5 + ( + 6, + TType.STRING, + "ownerName", + "UTF8", + None, + ), # 6 + ( + 7, + TType.I32, + "ownerType", + None, + None, + ), # 7 + ( + 8, + TType.STRING, + "catalogName", + "UTF8", + None, + ), # 8 + ( + 9, + TType.I32, + "createTime", + None, + None, + ), # 9 + ( + 10, + TType.STRING, + "managedLocationUri", + "UTF8", + None, + ), # 10 + ( + 11, + TType.I32, + "type", + None, + None, + ), # 11 + ( + 12, + TType.STRING, + "connector_name", + "UTF8", + None, + ), # 12 + ( + 13, + TType.STRING, + "remote_dbname", + "UTF8", + None, + ), # 13 +) +all_structs.append(SerDeInfo) +SerDeInfo.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "name", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "serializationLib", + "UTF8", + None, + ), # 2 + ( + 3, + TType.MAP, + "parameters", + (TType.STRING, "UTF8", TType.STRING, "UTF8", False), + None, + ), # 3 + ( + 4, + TType.STRING, + "description", + "UTF8", + None, + ), # 4 + ( + 5, + TType.STRING, + "serializerClass", + "UTF8", + None, + ), # 5 + ( + 6, + TType.STRING, + "deserializerClass", + "UTF8", + None, + ), # 6 + ( + 7, + TType.I32, + "serdeType", + None, + None, + ), # 7 +) +all_structs.append(Order) +Order.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "col", + "UTF8", + None, + ), # 1 + ( + 2, + TType.I32, + "order", + None, + None, + ), # 2 +) +all_structs.append(SkewedInfo) +SkewedInfo.thrift_spec = ( + None, # 0 + ( + 1, + TType.LIST, + "skewedColNames", + (TType.STRING, "UTF8", False), + None, + ), # 1 + ( + 2, + TType.LIST, + "skewedColValues", + (TType.LIST, (TType.STRING, "UTF8", False), False), + None, + ), # 2 + ( + 3, + TType.MAP, + "skewedColValueLocationMaps", + (TType.LIST, (TType.STRING, "UTF8", False), TType.STRING, "UTF8", False), + None, + ), # 3 +) +all_structs.append(StorageDescriptor) +StorageDescriptor.thrift_spec = ( + None, # 0 + ( + 1, + TType.LIST, + "cols", + (TType.STRUCT, [FieldSchema, None], False), + None, + ), # 1 + ( + 2, + TType.STRING, + "location", + "UTF8", + None, + ), # 2 + ( + 3, + TType.STRING, + "inputFormat", + "UTF8", + None, + ), # 3 + ( + 4, + TType.STRING, + "outputFormat", + "UTF8", + None, + ), # 4 + ( + 5, + TType.BOOL, + "compressed", + None, + None, + ), # 5 + ( + 6, + TType.I32, + "numBuckets", + None, + None, + ), # 6 + ( + 7, + TType.STRUCT, + "serdeInfo", + [SerDeInfo, None], + None, + ), # 7 + ( + 8, + TType.LIST, + "bucketCols", + (TType.STRING, "UTF8", False), + None, + ), # 8 + ( + 9, + TType.LIST, + "sortCols", + (TType.STRUCT, [Order, None], False), + None, + ), # 9 + ( + 10, + TType.MAP, + "parameters", + (TType.STRING, "UTF8", TType.STRING, "UTF8", False), + None, + ), # 10 + ( + 11, + TType.STRUCT, + "skewedInfo", + [SkewedInfo, None], + None, + ), # 11 + ( + 12, + TType.BOOL, + "storedAsSubDirectories", + None, + None, + ), # 12 +) +all_structs.append(CreationMetadata) +CreationMetadata.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "catName", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "dbName", + "UTF8", + None, + ), # 2 + ( + 3, + TType.STRING, + "tblName", + "UTF8", + None, + ), # 3 + ( + 4, + TType.SET, + "tablesUsed", + (TType.STRING, "UTF8", False), + None, + ), # 4 + ( + 5, + TType.STRING, + "validTxnList", + "UTF8", + None, + ), # 5 + ( + 6, + TType.I64, + "materializationTime", + None, + None, + ), # 6 + ( + 7, + TType.LIST, + "sourceTables", + (TType.STRUCT, [SourceTable, None], False), + None, + ), # 7 +) +all_structs.append(BooleanColumnStatsData) +BooleanColumnStatsData.thrift_spec = ( + None, # 0 + ( + 1, + TType.I64, + "numTrues", + None, + None, + ), # 1 + ( + 2, + TType.I64, + "numFalses", + None, + None, + ), # 2 + ( + 3, + TType.I64, + "numNulls", + None, + None, + ), # 3 + ( + 4, + TType.STRING, + "bitVectors", + "BINARY", + None, + ), # 4 +) +all_structs.append(DoubleColumnStatsData) +DoubleColumnStatsData.thrift_spec = ( + None, # 0 + ( + 1, + TType.DOUBLE, + "lowValue", + None, + None, + ), # 1 + ( + 2, + TType.DOUBLE, + "highValue", + None, + None, + ), # 2 + ( + 3, + TType.I64, + "numNulls", + None, + None, + ), # 3 + ( + 4, + TType.I64, + "numDVs", + None, + None, + ), # 4 + ( + 5, + TType.STRING, + "bitVectors", + "BINARY", + None, + ), # 5 +) +all_structs.append(LongColumnStatsData) +LongColumnStatsData.thrift_spec = ( + None, # 0 + ( + 1, + TType.I64, + "lowValue", + None, + None, + ), # 1 + ( + 2, + TType.I64, + "highValue", + None, + None, + ), # 2 + ( + 3, + TType.I64, + "numNulls", + None, + None, + ), # 3 + ( + 4, + TType.I64, + "numDVs", + None, + None, + ), # 4 + ( + 5, + TType.STRING, + "bitVectors", + "BINARY", + None, + ), # 5 +) +all_structs.append(StringColumnStatsData) +StringColumnStatsData.thrift_spec = ( + None, # 0 + ( + 1, + TType.I64, + "maxColLen", + None, + None, + ), # 1 + ( + 2, + TType.DOUBLE, + "avgColLen", + None, + None, + ), # 2 + ( + 3, + TType.I64, + "numNulls", + None, + None, + ), # 3 + ( + 4, + TType.I64, + "numDVs", + None, + None, + ), # 4 + ( + 5, + TType.STRING, + "bitVectors", + "BINARY", + None, + ), # 5 +) +all_structs.append(BinaryColumnStatsData) +BinaryColumnStatsData.thrift_spec = ( + None, # 0 + ( + 1, + TType.I64, + "maxColLen", + None, + None, + ), # 1 + ( + 2, + TType.DOUBLE, + "avgColLen", + None, + None, + ), # 2 + ( + 3, + TType.I64, + "numNulls", + None, + None, + ), # 3 + ( + 4, + TType.STRING, + "bitVectors", + "BINARY", + None, + ), # 4 +) +all_structs.append(Decimal) +Decimal.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "unscaled", + "BINARY", + None, + ), # 1 + None, # 2 + ( + 3, + TType.I16, + "scale", + None, + None, + ), # 3 +) +all_structs.append(DecimalColumnStatsData) +DecimalColumnStatsData.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "lowValue", + [Decimal, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "highValue", + [Decimal, None], + None, + ), # 2 + ( + 3, + TType.I64, + "numNulls", + None, + None, + ), # 3 + ( + 4, + TType.I64, + "numDVs", + None, + None, + ), # 4 + ( + 5, + TType.STRING, + "bitVectors", + "BINARY", + None, + ), # 5 +) +all_structs.append(Date) +Date.thrift_spec = ( + None, # 0 + ( + 1, + TType.I64, + "daysSinceEpoch", + None, + None, + ), # 1 +) +all_structs.append(DateColumnStatsData) +DateColumnStatsData.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "lowValue", + [Date, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "highValue", + [Date, None], + None, + ), # 2 + ( + 3, + TType.I64, + "numNulls", + None, + None, + ), # 3 + ( + 4, + TType.I64, + "numDVs", + None, + None, + ), # 4 + ( + 5, + TType.STRING, + "bitVectors", + "BINARY", + None, + ), # 5 +) +all_structs.append(Timestamp) +Timestamp.thrift_spec = ( + None, # 0 + ( + 1, + TType.I64, + "secondsSinceEpoch", + None, + None, + ), # 1 +) +all_structs.append(TimestampColumnStatsData) +TimestampColumnStatsData.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "lowValue", + [Timestamp, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "highValue", + [Timestamp, None], + None, + ), # 2 + ( + 3, + TType.I64, + "numNulls", + None, + None, + ), # 3 + ( + 4, + TType.I64, + "numDVs", + None, + None, + ), # 4 + ( + 5, + TType.STRING, + "bitVectors", + "BINARY", + None, + ), # 5 +) +all_structs.append(ColumnStatisticsData) +ColumnStatisticsData.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "booleanStats", + [BooleanColumnStatsData, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "longStats", + [LongColumnStatsData, None], + None, + ), # 2 + ( + 3, + TType.STRUCT, + "doubleStats", + [DoubleColumnStatsData, None], + None, + ), # 3 + ( + 4, + TType.STRUCT, + "stringStats", + [StringColumnStatsData, None], + None, + ), # 4 + ( + 5, + TType.STRUCT, + "binaryStats", + [BinaryColumnStatsData, None], + None, + ), # 5 + ( + 6, + TType.STRUCT, + "decimalStats", + [DecimalColumnStatsData, None], + None, + ), # 6 + ( + 7, + TType.STRUCT, + "dateStats", + [DateColumnStatsData, None], + None, + ), # 7 + ( + 8, + TType.STRUCT, + "timestampStats", + [TimestampColumnStatsData, None], + None, + ), # 8 +) +all_structs.append(ColumnStatisticsObj) +ColumnStatisticsObj.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "colName", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "colType", + "UTF8", + None, + ), # 2 + ( + 3, + TType.STRUCT, + "statsData", + [ColumnStatisticsData, None], + None, + ), # 3 +) +all_structs.append(ColumnStatisticsDesc) +ColumnStatisticsDesc.thrift_spec = ( + None, # 0 + ( + 1, + TType.BOOL, + "isTblLevel", + None, + None, + ), # 1 + ( + 2, + TType.STRING, + "dbName", + "UTF8", + None, + ), # 2 + ( + 3, + TType.STRING, + "tableName", + "UTF8", + None, + ), # 3 + ( + 4, + TType.STRING, + "partName", + "UTF8", + None, + ), # 4 + ( + 5, + TType.I64, + "lastAnalyzed", + None, + None, + ), # 5 + ( + 6, + TType.STRING, + "catName", + "UTF8", + None, + ), # 6 +) +all_structs.append(ColumnStatistics) +ColumnStatistics.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "statsDesc", + [ColumnStatisticsDesc, None], + None, + ), # 1 + ( + 2, + TType.LIST, + "statsObj", + (TType.STRUCT, [ColumnStatisticsObj, None], False), + None, + ), # 2 + ( + 3, + TType.BOOL, + "isStatsCompliant", + None, + None, + ), # 3 + ( + 4, + TType.STRING, + "engine", + "UTF8", + None, + ), # 4 +) +all_structs.append(FileMetadata) +FileMetadata.thrift_spec = ( + None, # 0 + ( + 1, + TType.BYTE, + "type", + None, + 1, + ), # 1 + ( + 2, + TType.BYTE, + "version", + None, + 1, + ), # 2 + ( + 3, + TType.LIST, + "data", + (TType.STRING, "BINARY", False), + None, + ), # 3 +) +all_structs.append(ObjectDictionary) +ObjectDictionary.thrift_spec = ( + None, # 0 + ( + 1, + TType.MAP, + "values", + (TType.STRING, "UTF8", TType.LIST, (TType.STRING, "BINARY", False), False), + None, + ), # 1 +) +all_structs.append(Table) +Table.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "tableName", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "dbName", + "UTF8", + None, + ), # 2 + ( + 3, + TType.STRING, + "owner", + "UTF8", + None, + ), # 3 + ( + 4, + TType.I32, + "createTime", + None, + None, + ), # 4 + ( + 5, + TType.I32, + "lastAccessTime", + None, + None, + ), # 5 + ( + 6, + TType.I32, + "retention", + None, + None, + ), # 6 + ( + 7, + TType.STRUCT, + "sd", + [StorageDescriptor, None], + None, + ), # 7 + ( + 8, + TType.LIST, + "partitionKeys", + (TType.STRUCT, [FieldSchema, None], False), + None, + ), # 8 + ( + 9, + TType.MAP, + "parameters", + (TType.STRING, "UTF8", TType.STRING, "UTF8", False), + None, + ), # 9 + ( + 10, + TType.STRING, + "viewOriginalText", + "UTF8", + None, + ), # 10 + ( + 11, + TType.STRING, + "viewExpandedText", + "UTF8", + None, + ), # 11 + ( + 12, + TType.STRING, + "tableType", + "UTF8", + None, + ), # 12 + ( + 13, + TType.STRUCT, + "privileges", + [PrincipalPrivilegeSet, None], + None, + ), # 13 + ( + 14, + TType.BOOL, + "temporary", + None, + False, + ), # 14 + ( + 15, + TType.BOOL, + "rewriteEnabled", + None, + None, + ), # 15 + ( + 16, + TType.STRUCT, + "creationMetadata", + [CreationMetadata, None], + None, + ), # 16 + ( + 17, + TType.STRING, + "catName", + "UTF8", + None, + ), # 17 + ( + 18, + TType.I32, + "ownerType", + None, + 1, + ), # 18 + ( + 19, + TType.I64, + "writeId", + None, + -1, + ), # 19 + ( + 20, + TType.BOOL, + "isStatsCompliant", + None, + None, + ), # 20 + ( + 21, + TType.STRUCT, + "colStats", + [ColumnStatistics, None], + None, + ), # 21 + ( + 22, + TType.BYTE, + "accessType", + None, + None, + ), # 22 + ( + 23, + TType.LIST, + "requiredReadCapabilities", + (TType.STRING, "UTF8", False), + None, + ), # 23 + ( + 24, + TType.LIST, + "requiredWriteCapabilities", + (TType.STRING, "UTF8", False), + None, + ), # 24 + ( + 25, + TType.I64, + "id", + None, + None, + ), # 25 + ( + 26, + TType.STRUCT, + "fileMetadata", + [FileMetadata, None], + None, + ), # 26 + ( + 27, + TType.STRUCT, + "dictionary", + [ObjectDictionary, None], + None, + ), # 27 + ( + 28, + TType.I64, + "txnId", + None, + None, + ), # 28 +) +all_structs.append(SourceTable) +SourceTable.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "table", + [Table, None], + None, + ), # 1 + ( + 2, + TType.I64, + "insertedCount", + None, + None, + ), # 2 + ( + 3, + TType.I64, + "updatedCount", + None, + None, + ), # 3 + ( + 4, + TType.I64, + "deletedCount", + None, + None, + ), # 4 +) +all_structs.append(Partition) +Partition.thrift_spec = ( + None, # 0 + ( + 1, + TType.LIST, + "values", + (TType.STRING, "UTF8", False), + None, + ), # 1 + ( + 2, + TType.STRING, + "dbName", + "UTF8", + None, + ), # 2 + ( + 3, + TType.STRING, + "tableName", + "UTF8", + None, + ), # 3 + ( + 4, + TType.I32, + "createTime", + None, + None, + ), # 4 + ( + 5, + TType.I32, + "lastAccessTime", + None, + None, + ), # 5 + ( + 6, + TType.STRUCT, + "sd", + [StorageDescriptor, None], + None, + ), # 6 + ( + 7, + TType.MAP, + "parameters", + (TType.STRING, "UTF8", TType.STRING, "UTF8", False), + None, + ), # 7 + ( + 8, + TType.STRUCT, + "privileges", + [PrincipalPrivilegeSet, None], + None, + ), # 8 + ( + 9, + TType.STRING, + "catName", + "UTF8", + None, + ), # 9 + ( + 10, + TType.I64, + "writeId", + None, + -1, + ), # 10 + ( + 11, + TType.BOOL, + "isStatsCompliant", + None, + None, + ), # 11 + ( + 12, + TType.STRUCT, + "colStats", + [ColumnStatistics, None], + None, + ), # 12 + ( + 13, + TType.STRUCT, + "fileMetadata", + [FileMetadata, None], + None, + ), # 13 +) +all_structs.append(PartitionWithoutSD) +PartitionWithoutSD.thrift_spec = ( + None, # 0 + ( + 1, + TType.LIST, + "values", + (TType.STRING, "UTF8", False), + None, + ), # 1 + ( + 2, + TType.I32, + "createTime", + None, + None, + ), # 2 + ( + 3, + TType.I32, + "lastAccessTime", + None, + None, + ), # 3 + ( + 4, + TType.STRING, + "relativePath", + "UTF8", + None, + ), # 4 + ( + 5, + TType.MAP, + "parameters", + (TType.STRING, "UTF8", TType.STRING, "UTF8", False), + None, + ), # 5 + ( + 6, + TType.STRUCT, + "privileges", + [PrincipalPrivilegeSet, None], + None, + ), # 6 +) +all_structs.append(PartitionSpecWithSharedSD) +PartitionSpecWithSharedSD.thrift_spec = ( + None, # 0 + ( + 1, + TType.LIST, + "partitions", + (TType.STRUCT, [PartitionWithoutSD, None], False), + None, + ), # 1 + ( + 2, + TType.STRUCT, + "sd", + [StorageDescriptor, None], + None, + ), # 2 +) +all_structs.append(PartitionListComposingSpec) +PartitionListComposingSpec.thrift_spec = ( + None, # 0 + ( + 1, + TType.LIST, + "partitions", + (TType.STRUCT, [Partition, None], False), + None, + ), # 1 +) +all_structs.append(PartitionSpec) +PartitionSpec.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "dbName", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "tableName", + "UTF8", + None, + ), # 2 + ( + 3, + TType.STRING, + "rootPath", + "UTF8", + None, + ), # 3 + ( + 4, + TType.STRUCT, + "sharedSDPartitionSpec", + [PartitionSpecWithSharedSD, None], + None, + ), # 4 + ( + 5, + TType.STRUCT, + "partitionList", + [PartitionListComposingSpec, None], + None, + ), # 5 + ( + 6, + TType.STRING, + "catName", + "UTF8", + None, + ), # 6 + ( + 7, + TType.I64, + "writeId", + None, + -1, + ), # 7 + ( + 8, + TType.BOOL, + "isStatsCompliant", + None, + None, + ), # 8 +) +all_structs.append(AggrStats) +AggrStats.thrift_spec = ( + None, # 0 + ( + 1, + TType.LIST, + "colStats", + (TType.STRUCT, [ColumnStatisticsObj, None], False), + None, + ), # 1 + ( + 2, + TType.I64, + "partsFound", + None, + None, + ), # 2 + ( + 3, + TType.BOOL, + "isStatsCompliant", + None, + None, + ), # 3 +) +all_structs.append(SetPartitionsStatsRequest) +SetPartitionsStatsRequest.thrift_spec = ( + None, # 0 + ( + 1, + TType.LIST, + "colStats", + (TType.STRUCT, [ColumnStatistics, None], False), + None, + ), # 1 + ( + 2, + TType.BOOL, + "needMerge", + None, + None, + ), # 2 + ( + 3, + TType.I64, + "writeId", + None, + -1, + ), # 3 + ( + 4, + TType.STRING, + "validWriteIdList", + "UTF8", + None, + ), # 4 + ( + 5, + TType.STRING, + "engine", + "UTF8", + None, + ), # 5 +) +all_structs.append(SetPartitionsStatsResponse) +SetPartitionsStatsResponse.thrift_spec = ( + None, # 0 + ( + 1, + TType.BOOL, + "result", + None, + None, + ), # 1 +) +all_structs.append(Schema) +Schema.thrift_spec = ( + None, # 0 + ( + 1, + TType.LIST, + "fieldSchemas", + (TType.STRUCT, [FieldSchema, None], False), + None, + ), # 1 + ( + 2, + TType.MAP, + "properties", + (TType.STRING, "UTF8", TType.STRING, "UTF8", False), + None, + ), # 2 +) +all_structs.append(PrimaryKeysRequest) +PrimaryKeysRequest.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "db_name", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "tbl_name", + "UTF8", + None, + ), # 2 + ( + 3, + TType.STRING, + "catName", + "UTF8", + None, + ), # 3 + ( + 4, + TType.STRING, + "validWriteIdList", + "UTF8", + None, + ), # 4 + ( + 5, + TType.I64, + "tableId", + None, + -1, + ), # 5 +) +all_structs.append(PrimaryKeysResponse) +PrimaryKeysResponse.thrift_spec = ( + None, # 0 + ( + 1, + TType.LIST, + "primaryKeys", + (TType.STRUCT, [SQLPrimaryKey, None], False), + None, + ), # 1 +) +all_structs.append(ForeignKeysRequest) +ForeignKeysRequest.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "parent_db_name", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "parent_tbl_name", + "UTF8", + None, + ), # 2 + ( + 3, + TType.STRING, + "foreign_db_name", + "UTF8", + None, + ), # 3 + ( + 4, + TType.STRING, + "foreign_tbl_name", + "UTF8", + None, + ), # 4 + ( + 5, + TType.STRING, + "catName", + "UTF8", + None, + ), # 5 + ( + 6, + TType.STRING, + "validWriteIdList", + "UTF8", + None, + ), # 6 + ( + 7, + TType.I64, + "tableId", + None, + -1, + ), # 7 +) +all_structs.append(ForeignKeysResponse) +ForeignKeysResponse.thrift_spec = ( + None, # 0 + ( + 1, + TType.LIST, + "foreignKeys", + (TType.STRUCT, [SQLForeignKey, None], False), + None, + ), # 1 +) +all_structs.append(UniqueConstraintsRequest) +UniqueConstraintsRequest.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "catName", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "db_name", + "UTF8", + None, + ), # 2 + ( + 3, + TType.STRING, + "tbl_name", + "UTF8", + None, + ), # 3 + ( + 4, + TType.STRING, + "validWriteIdList", + "UTF8", + None, + ), # 4 + ( + 5, + TType.I64, + "tableId", + None, + -1, + ), # 5 +) +all_structs.append(UniqueConstraintsResponse) +UniqueConstraintsResponse.thrift_spec = ( + None, # 0 + ( + 1, + TType.LIST, + "uniqueConstraints", + (TType.STRUCT, [SQLUniqueConstraint, None], False), + None, + ), # 1 +) +all_structs.append(NotNullConstraintsRequest) +NotNullConstraintsRequest.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "catName", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "db_name", + "UTF8", + None, + ), # 2 + ( + 3, + TType.STRING, + "tbl_name", + "UTF8", + None, + ), # 3 + ( + 4, + TType.STRING, + "validWriteIdList", + "UTF8", + None, + ), # 4 + ( + 5, + TType.I64, + "tableId", + None, + -1, + ), # 5 +) +all_structs.append(NotNullConstraintsResponse) +NotNullConstraintsResponse.thrift_spec = ( + None, # 0 + ( + 1, + TType.LIST, + "notNullConstraints", + (TType.STRUCT, [SQLNotNullConstraint, None], False), + None, + ), # 1 +) +all_structs.append(DefaultConstraintsRequest) +DefaultConstraintsRequest.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "catName", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "db_name", + "UTF8", + None, + ), # 2 + ( + 3, + TType.STRING, + "tbl_name", + "UTF8", + None, + ), # 3 + ( + 4, + TType.STRING, + "validWriteIdList", + "UTF8", + None, + ), # 4 + ( + 5, + TType.I64, + "tableId", + None, + -1, + ), # 5 +) +all_structs.append(DefaultConstraintsResponse) +DefaultConstraintsResponse.thrift_spec = ( + None, # 0 + ( + 1, + TType.LIST, + "defaultConstraints", + (TType.STRUCT, [SQLDefaultConstraint, None], False), + None, + ), # 1 +) +all_structs.append(CheckConstraintsRequest) +CheckConstraintsRequest.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "catName", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "db_name", + "UTF8", + None, + ), # 2 + ( + 3, + TType.STRING, + "tbl_name", + "UTF8", + None, + ), # 3 + ( + 4, + TType.STRING, + "validWriteIdList", + "UTF8", + None, + ), # 4 + ( + 5, + TType.I64, + "tableId", + None, + -1, + ), # 5 +) +all_structs.append(CheckConstraintsResponse) +CheckConstraintsResponse.thrift_spec = ( + None, # 0 + ( + 1, + TType.LIST, + "checkConstraints", + (TType.STRUCT, [SQLCheckConstraint, None], False), + None, + ), # 1 +) +all_structs.append(AllTableConstraintsRequest) +AllTableConstraintsRequest.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "dbName", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "tblName", + "UTF8", + None, + ), # 2 + ( + 3, + TType.STRING, + "catName", + "UTF8", + None, + ), # 3 + ( + 4, + TType.STRING, + "validWriteIdList", + "UTF8", + None, + ), # 4 + ( + 5, + TType.I64, + "tableId", + None, + -1, + ), # 5 +) +all_structs.append(AllTableConstraintsResponse) +AllTableConstraintsResponse.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "allTableConstraints", + [SQLAllTableConstraints, None], + None, + ), # 1 +) +all_structs.append(DropConstraintRequest) +DropConstraintRequest.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "dbname", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "tablename", + "UTF8", + None, + ), # 2 + ( + 3, + TType.STRING, + "constraintname", + "UTF8", + None, + ), # 3 + ( + 4, + TType.STRING, + "catName", + "UTF8", + None, + ), # 4 +) +all_structs.append(AddPrimaryKeyRequest) +AddPrimaryKeyRequest.thrift_spec = ( + None, # 0 + ( + 1, + TType.LIST, + "primaryKeyCols", + (TType.STRUCT, [SQLPrimaryKey, None], False), + None, + ), # 1 +) +all_structs.append(AddForeignKeyRequest) +AddForeignKeyRequest.thrift_spec = ( + None, # 0 + ( + 1, + TType.LIST, + "foreignKeyCols", + (TType.STRUCT, [SQLForeignKey, None], False), + None, + ), # 1 +) +all_structs.append(AddUniqueConstraintRequest) +AddUniqueConstraintRequest.thrift_spec = ( + None, # 0 + ( + 1, + TType.LIST, + "uniqueConstraintCols", + (TType.STRUCT, [SQLUniqueConstraint, None], False), + None, + ), # 1 +) +all_structs.append(AddNotNullConstraintRequest) +AddNotNullConstraintRequest.thrift_spec = ( + None, # 0 + ( + 1, + TType.LIST, + "notNullConstraintCols", + (TType.STRUCT, [SQLNotNullConstraint, None], False), + None, + ), # 1 +) +all_structs.append(AddDefaultConstraintRequest) +AddDefaultConstraintRequest.thrift_spec = ( + None, # 0 + ( + 1, + TType.LIST, + "defaultConstraintCols", + (TType.STRUCT, [SQLDefaultConstraint, None], False), + None, + ), # 1 +) +all_structs.append(AddCheckConstraintRequest) +AddCheckConstraintRequest.thrift_spec = ( + None, # 0 + ( + 1, + TType.LIST, + "checkConstraintCols", + (TType.STRUCT, [SQLCheckConstraint, None], False), + None, + ), # 1 +) +all_structs.append(PartitionsByExprResult) +PartitionsByExprResult.thrift_spec = ( + None, # 0 + ( + 1, + TType.LIST, + "partitions", + (TType.STRUCT, [Partition, None], False), + None, + ), # 1 + ( + 2, + TType.BOOL, + "hasUnknownPartitions", + None, + None, + ), # 2 +) +all_structs.append(PartitionsSpecByExprResult) +PartitionsSpecByExprResult.thrift_spec = ( + None, # 0 + ( + 1, + TType.LIST, + "partitionsSpec", + (TType.STRUCT, [PartitionSpec, None], False), + None, + ), # 1 + ( + 2, + TType.BOOL, + "hasUnknownPartitions", + None, + None, + ), # 2 +) +all_structs.append(PartitionsByExprRequest) +PartitionsByExprRequest.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "dbName", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "tblName", + "UTF8", + None, + ), # 2 + ( + 3, + TType.STRING, + "expr", + "BINARY", + None, + ), # 3 + ( + 4, + TType.STRING, + "defaultPartitionName", + "UTF8", + None, + ), # 4 + ( + 5, + TType.I16, + "maxParts", + None, + -1, + ), # 5 + ( + 6, + TType.STRING, + "catName", + "UTF8", + None, + ), # 6 + ( + 7, + TType.STRING, + "order", + "UTF8", + None, + ), # 7 + ( + 8, + TType.STRING, + "validWriteIdList", + "UTF8", + None, + ), # 8 + ( + 9, + TType.I64, + "id", + None, + -1, + ), # 9 +) +all_structs.append(TableStatsResult) +TableStatsResult.thrift_spec = ( + None, # 0 + ( + 1, + TType.LIST, + "tableStats", + (TType.STRUCT, [ColumnStatisticsObj, None], False), + None, + ), # 1 + ( + 2, + TType.BOOL, + "isStatsCompliant", + None, + None, + ), # 2 +) +all_structs.append(PartitionsStatsResult) +PartitionsStatsResult.thrift_spec = ( + None, # 0 + ( + 1, + TType.MAP, + "partStats", + (TType.STRING, "UTF8", TType.LIST, (TType.STRUCT, [ColumnStatisticsObj, None], False), False), + None, + ), # 1 + ( + 2, + TType.BOOL, + "isStatsCompliant", + None, + None, + ), # 2 +) +all_structs.append(TableStatsRequest) +TableStatsRequest.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "dbName", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "tblName", + "UTF8", + None, + ), # 2 + ( + 3, + TType.LIST, + "colNames", + (TType.STRING, "UTF8", False), + None, + ), # 3 + ( + 4, + TType.STRING, + "catName", + "UTF8", + None, + ), # 4 + ( + 5, + TType.STRING, + "validWriteIdList", + "UTF8", + None, + ), # 5 + ( + 6, + TType.STRING, + "engine", + "UTF8", + None, + ), # 6 + ( + 7, + TType.I64, + "id", + None, + -1, + ), # 7 +) +all_structs.append(PartitionsStatsRequest) +PartitionsStatsRequest.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "dbName", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "tblName", + "UTF8", + None, + ), # 2 + ( + 3, + TType.LIST, + "colNames", + (TType.STRING, "UTF8", False), + None, + ), # 3 + ( + 4, + TType.LIST, + "partNames", + (TType.STRING, "UTF8", False), + None, + ), # 4 + ( + 5, + TType.STRING, + "catName", + "UTF8", + None, + ), # 5 + ( + 6, + TType.STRING, + "validWriteIdList", + "UTF8", + None, + ), # 6 + ( + 7, + TType.STRING, + "engine", + "UTF8", + None, + ), # 7 +) +all_structs.append(AddPartitionsResult) +AddPartitionsResult.thrift_spec = ( + None, # 0 + ( + 1, + TType.LIST, + "partitions", + (TType.STRUCT, [Partition, None], False), + None, + ), # 1 + ( + 2, + TType.BOOL, + "isStatsCompliant", + None, + None, + ), # 2 +) +all_structs.append(AddPartitionsRequest) +AddPartitionsRequest.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "dbName", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "tblName", + "UTF8", + None, + ), # 2 + ( + 3, + TType.LIST, + "parts", + (TType.STRUCT, [Partition, None], False), + None, + ), # 3 + ( + 4, + TType.BOOL, + "ifNotExists", + None, + None, + ), # 4 + ( + 5, + TType.BOOL, + "needResult", + None, + True, + ), # 5 + ( + 6, + TType.STRING, + "catName", + "UTF8", + None, + ), # 6 + ( + 7, + TType.STRING, + "validWriteIdList", + "UTF8", + None, + ), # 7 +) +all_structs.append(DropPartitionsResult) +DropPartitionsResult.thrift_spec = ( + None, # 0 + ( + 1, + TType.LIST, + "partitions", + (TType.STRUCT, [Partition, None], False), + None, + ), # 1 +) +all_structs.append(DropPartitionsExpr) +DropPartitionsExpr.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "expr", + "BINARY", + None, + ), # 1 + ( + 2, + TType.I32, + "partArchiveLevel", + None, + None, + ), # 2 +) +all_structs.append(RequestPartsSpec) +RequestPartsSpec.thrift_spec = ( + None, # 0 + ( + 1, + TType.LIST, + "names", + (TType.STRING, "UTF8", False), + None, + ), # 1 + ( + 2, + TType.LIST, + "exprs", + (TType.STRUCT, [DropPartitionsExpr, None], False), + None, + ), # 2 +) +all_structs.append(DropPartitionsRequest) +DropPartitionsRequest.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "dbName", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "tblName", + "UTF8", + None, + ), # 2 + ( + 3, + TType.STRUCT, + "parts", + [RequestPartsSpec, None], + None, + ), # 3 + ( + 4, + TType.BOOL, + "deleteData", + None, + None, + ), # 4 + ( + 5, + TType.BOOL, + "ifExists", + None, + True, + ), # 5 + ( + 6, + TType.BOOL, + "ignoreProtection", + None, + None, + ), # 6 + ( + 7, + TType.STRUCT, + "environmentContext", + [EnvironmentContext, None], + None, + ), # 7 + ( + 8, + TType.BOOL, + "needResult", + None, + True, + ), # 8 + ( + 9, + TType.STRING, + "catName", + "UTF8", + None, + ), # 9 +) +all_structs.append(PartitionValuesRequest) +PartitionValuesRequest.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "dbName", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "tblName", + "UTF8", + None, + ), # 2 + ( + 3, + TType.LIST, + "partitionKeys", + (TType.STRUCT, [FieldSchema, None], False), + None, + ), # 3 + ( + 4, + TType.BOOL, + "applyDistinct", + None, + True, + ), # 4 + ( + 5, + TType.STRING, + "filter", + "UTF8", + None, + ), # 5 + ( + 6, + TType.LIST, + "partitionOrder", + (TType.STRUCT, [FieldSchema, None], False), + None, + ), # 6 + ( + 7, + TType.BOOL, + "ascending", + None, + True, + ), # 7 + ( + 8, + TType.I64, + "maxParts", + None, + -1, + ), # 8 + ( + 9, + TType.STRING, + "catName", + "UTF8", + None, + ), # 9 + ( + 10, + TType.STRING, + "validWriteIdList", + "UTF8", + None, + ), # 10 +) +all_structs.append(PartitionValuesRow) +PartitionValuesRow.thrift_spec = ( + None, # 0 + ( + 1, + TType.LIST, + "row", + (TType.STRING, "UTF8", False), + None, + ), # 1 +) +all_structs.append(PartitionValuesResponse) +PartitionValuesResponse.thrift_spec = ( + None, # 0 + ( + 1, + TType.LIST, + "partitionValues", + (TType.STRUCT, [PartitionValuesRow, None], False), + None, + ), # 1 +) +all_structs.append(GetPartitionsByNamesRequest) +GetPartitionsByNamesRequest.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "db_name", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "tbl_name", + "UTF8", + None, + ), # 2 + ( + 3, + TType.LIST, + "names", + (TType.STRING, "UTF8", False), + None, + ), # 3 + ( + 4, + TType.BOOL, + "get_col_stats", + None, + None, + ), # 4 + ( + 5, + TType.LIST, + "processorCapabilities", + (TType.STRING, "UTF8", False), + None, + ), # 5 + ( + 6, + TType.STRING, + "processorIdentifier", + "UTF8", + None, + ), # 6 + ( + 7, + TType.STRING, + "engine", + "UTF8", + None, + ), # 7 + ( + 8, + TType.STRING, + "validWriteIdList", + "UTF8", + None, + ), # 8 + ( + 9, + TType.BOOL, + "getFileMetadata", + None, + None, + ), # 9 + ( + 10, + TType.I64, + "id", + None, + -1, + ), # 10 +) +all_structs.append(GetPartitionsByNamesResult) +GetPartitionsByNamesResult.thrift_spec = ( + None, # 0 + ( + 1, + TType.LIST, + "partitions", + (TType.STRUCT, [Partition, None], False), + None, + ), # 1 + ( + 2, + TType.STRUCT, + "dictionary", + [ObjectDictionary, None], + None, + ), # 2 +) +all_structs.append(DataConnector) +DataConnector.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "name", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "type", + "UTF8", + None, + ), # 2 + ( + 3, + TType.STRING, + "url", + "UTF8", + None, + ), # 3 + ( + 4, + TType.STRING, + "description", + "UTF8", + None, + ), # 4 + ( + 5, + TType.MAP, + "parameters", + (TType.STRING, "UTF8", TType.STRING, "UTF8", False), + None, + ), # 5 + ( + 6, + TType.STRING, + "ownerName", + "UTF8", + None, + ), # 6 + ( + 7, + TType.I32, + "ownerType", + None, + None, + ), # 7 + ( + 8, + TType.I32, + "createTime", + None, + None, + ), # 8 +) +all_structs.append(ResourceUri) +ResourceUri.thrift_spec = ( + None, # 0 + ( + 1, + TType.I32, + "resourceType", + None, + None, + ), # 1 + ( + 2, + TType.STRING, + "uri", + "UTF8", + None, + ), # 2 +) +all_structs.append(Function) +Function.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "functionName", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "dbName", + "UTF8", + None, + ), # 2 + ( + 3, + TType.STRING, + "className", + "UTF8", + None, + ), # 3 + ( + 4, + TType.STRING, + "ownerName", + "UTF8", + None, + ), # 4 + ( + 5, + TType.I32, + "ownerType", + None, + None, + ), # 5 + ( + 6, + TType.I32, + "createTime", + None, + None, + ), # 6 + ( + 7, + TType.I32, + "functionType", + None, + None, + ), # 7 + ( + 8, + TType.LIST, + "resourceUris", + (TType.STRUCT, [ResourceUri, None], False), + None, + ), # 8 + ( + 9, + TType.STRING, + "catName", + "UTF8", + None, + ), # 9 +) +all_structs.append(TxnInfo) +TxnInfo.thrift_spec = ( + None, # 0 + ( + 1, + TType.I64, + "id", + None, + None, + ), # 1 + ( + 2, + TType.I32, + "state", + None, + None, + ), # 2 + ( + 3, + TType.STRING, + "user", + "UTF8", + None, + ), # 3 + ( + 4, + TType.STRING, + "hostname", + "UTF8", + None, + ), # 4 + ( + 5, + TType.STRING, + "agentInfo", + "UTF8", + "Unknown", + ), # 5 + ( + 6, + TType.I32, + "heartbeatCount", + None, + 0, + ), # 6 + ( + 7, + TType.STRING, + "metaInfo", + "UTF8", + None, + ), # 7 + ( + 8, + TType.I64, + "startedTime", + None, + None, + ), # 8 + ( + 9, + TType.I64, + "lastHeartbeatTime", + None, + None, + ), # 9 +) +all_structs.append(GetOpenTxnsInfoResponse) +GetOpenTxnsInfoResponse.thrift_spec = ( + None, # 0 + ( + 1, + TType.I64, + "txn_high_water_mark", + None, + None, + ), # 1 + ( + 2, + TType.LIST, + "open_txns", + (TType.STRUCT, [TxnInfo, None], False), + None, + ), # 2 +) +all_structs.append(GetOpenTxnsResponse) +GetOpenTxnsResponse.thrift_spec = ( + None, # 0 + ( + 1, + TType.I64, + "txn_high_water_mark", + None, + None, + ), # 1 + ( + 2, + TType.LIST, + "open_txns", + (TType.I64, None, False), + None, + ), # 2 + ( + 3, + TType.I64, + "min_open_txn", + None, + None, + ), # 3 + ( + 4, + TType.STRING, + "abortedBits", + "BINARY", + None, + ), # 4 +) +all_structs.append(OpenTxnRequest) +OpenTxnRequest.thrift_spec = ( + None, # 0 + ( + 1, + TType.I32, + "num_txns", + None, + None, + ), # 1 + ( + 2, + TType.STRING, + "user", + "UTF8", + None, + ), # 2 + ( + 3, + TType.STRING, + "hostname", + "UTF8", + None, + ), # 3 + ( + 4, + TType.STRING, + "agentInfo", + "UTF8", + "Unknown", + ), # 4 + ( + 5, + TType.STRING, + "replPolicy", + "UTF8", + None, + ), # 5 + ( + 6, + TType.LIST, + "replSrcTxnIds", + (TType.I64, None, False), + None, + ), # 6 + ( + 7, + TType.I32, + "txn_type", + None, + 0, + ), # 7 +) +all_structs.append(OpenTxnsResponse) +OpenTxnsResponse.thrift_spec = ( + None, # 0 + ( + 1, + TType.LIST, + "txn_ids", + (TType.I64, None, False), + None, + ), # 1 +) +all_structs.append(AbortTxnRequest) +AbortTxnRequest.thrift_spec = ( + None, # 0 + ( + 1, + TType.I64, + "txnid", + None, + None, + ), # 1 + ( + 2, + TType.STRING, + "replPolicy", + "UTF8", + None, + ), # 2 + ( + 3, + TType.I32, + "txn_type", + None, + None, + ), # 3 +) +all_structs.append(AbortTxnsRequest) +AbortTxnsRequest.thrift_spec = ( + None, # 0 + ( + 1, + TType.LIST, + "txn_ids", + (TType.I64, None, False), + None, + ), # 1 +) +all_structs.append(CommitTxnKeyValue) +CommitTxnKeyValue.thrift_spec = ( + None, # 0 + ( + 1, + TType.I64, + "tableId", + None, + None, + ), # 1 + ( + 2, + TType.STRING, + "key", + "UTF8", + None, + ), # 2 + ( + 3, + TType.STRING, + "value", + "UTF8", + None, + ), # 3 +) +all_structs.append(WriteEventInfo) +WriteEventInfo.thrift_spec = ( + None, # 0 + ( + 1, + TType.I64, + "writeId", + None, + None, + ), # 1 + ( + 2, + TType.STRING, + "database", + "UTF8", + None, + ), # 2 + ( + 3, + TType.STRING, + "table", + "UTF8", + None, + ), # 3 + ( + 4, + TType.STRING, + "files", + "UTF8", + None, + ), # 4 + ( + 5, + TType.STRING, + "partition", + "UTF8", + None, + ), # 5 + ( + 6, + TType.STRING, + "tableObj", + "UTF8", + None, + ), # 6 + ( + 7, + TType.STRING, + "partitionObj", + "UTF8", + None, + ), # 7 +) +all_structs.append(ReplLastIdInfo) +ReplLastIdInfo.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "database", + "UTF8", + None, + ), # 1 + ( + 2, + TType.I64, + "lastReplId", + None, + None, + ), # 2 + ( + 3, + TType.STRING, + "table", + "UTF8", + None, + ), # 3 + ( + 4, + TType.STRING, + "catalog", + "UTF8", + None, + ), # 4 + ( + 5, + TType.LIST, + "partitionList", + (TType.STRING, "UTF8", False), + None, + ), # 5 +) +all_structs.append(UpdateTransactionalStatsRequest) +UpdateTransactionalStatsRequest.thrift_spec = ( + None, # 0 + ( + 1, + TType.I64, + "tableId", + None, + None, + ), # 1 + ( + 2, + TType.I64, + "insertCount", + None, + None, + ), # 2 + ( + 3, + TType.I64, + "updatedCount", + None, + None, + ), # 3 + ( + 4, + TType.I64, + "deletedCount", + None, + None, + ), # 4 +) +all_structs.append(CommitTxnRequest) +CommitTxnRequest.thrift_spec = ( + None, # 0 + ( + 1, + TType.I64, + "txnid", + None, + None, + ), # 1 + ( + 2, + TType.STRING, + "replPolicy", + "UTF8", + None, + ), # 2 + ( + 3, + TType.LIST, + "writeEventInfos", + (TType.STRUCT, [WriteEventInfo, None], False), + None, + ), # 3 + ( + 4, + TType.STRUCT, + "replLastIdInfo", + [ReplLastIdInfo, None], + None, + ), # 4 + ( + 5, + TType.STRUCT, + "keyValue", + [CommitTxnKeyValue, None], + None, + ), # 5 + ( + 6, + TType.BOOL, + "exclWriteEnabled", + None, + True, + ), # 6 + ( + 7, + TType.I32, + "txn_type", + None, + None, + ), # 7 +) +all_structs.append(ReplTblWriteIdStateRequest) +ReplTblWriteIdStateRequest.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "validWriteIdlist", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "user", + "UTF8", + None, + ), # 2 + ( + 3, + TType.STRING, + "hostName", + "UTF8", + None, + ), # 3 + ( + 4, + TType.STRING, + "dbName", + "UTF8", + None, + ), # 4 + ( + 5, + TType.STRING, + "tableName", + "UTF8", + None, + ), # 5 + ( + 6, + TType.LIST, + "partNames", + (TType.STRING, "UTF8", False), + None, + ), # 6 +) +all_structs.append(GetValidWriteIdsRequest) +GetValidWriteIdsRequest.thrift_spec = ( + None, # 0 + ( + 1, + TType.LIST, + "fullTableNames", + (TType.STRING, "UTF8", False), + None, + ), # 1 + ( + 2, + TType.STRING, + "validTxnList", + "UTF8", + None, + ), # 2 + ( + 3, + TType.I64, + "writeId", + None, + None, + ), # 3 +) +all_structs.append(TableValidWriteIds) +TableValidWriteIds.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "fullTableName", + "UTF8", + None, + ), # 1 + ( + 2, + TType.I64, + "writeIdHighWaterMark", + None, + None, + ), # 2 + ( + 3, + TType.LIST, + "invalidWriteIds", + (TType.I64, None, False), + None, + ), # 3 + ( + 4, + TType.I64, + "minOpenWriteId", + None, + None, + ), # 4 + ( + 5, + TType.STRING, + "abortedBits", + "BINARY", + None, + ), # 5 +) +all_structs.append(GetValidWriteIdsResponse) +GetValidWriteIdsResponse.thrift_spec = ( + None, # 0 + ( + 1, + TType.LIST, + "tblValidWriteIds", + (TType.STRUCT, [TableValidWriteIds, None], False), + None, + ), # 1 +) +all_structs.append(TxnToWriteId) +TxnToWriteId.thrift_spec = ( + None, # 0 + ( + 1, + TType.I64, + "txnId", + None, + None, + ), # 1 + ( + 2, + TType.I64, + "writeId", + None, + None, + ), # 2 +) +all_structs.append(AllocateTableWriteIdsRequest) +AllocateTableWriteIdsRequest.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "dbName", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "tableName", + "UTF8", + None, + ), # 2 + ( + 3, + TType.LIST, + "txnIds", + (TType.I64, None, False), + None, + ), # 3 + ( + 4, + TType.STRING, + "replPolicy", + "UTF8", + None, + ), # 4 + ( + 5, + TType.LIST, + "srcTxnToWriteIdList", + (TType.STRUCT, [TxnToWriteId, None], False), + None, + ), # 5 +) +all_structs.append(AllocateTableWriteIdsResponse) +AllocateTableWriteIdsResponse.thrift_spec = ( + None, # 0 + ( + 1, + TType.LIST, + "txnToWriteIds", + (TType.STRUCT, [TxnToWriteId, None], False), + None, + ), # 1 +) +all_structs.append(MaxAllocatedTableWriteIdRequest) +MaxAllocatedTableWriteIdRequest.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "dbName", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "tableName", + "UTF8", + None, + ), # 2 +) +all_structs.append(MaxAllocatedTableWriteIdResponse) +MaxAllocatedTableWriteIdResponse.thrift_spec = ( + None, # 0 + ( + 1, + TType.I64, + "maxWriteId", + None, + None, + ), # 1 +) +all_structs.append(SeedTableWriteIdsRequest) +SeedTableWriteIdsRequest.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "dbName", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "tableName", + "UTF8", + None, + ), # 2 + ( + 3, + TType.I64, + "seedWriteId", + None, + None, + ), # 3 +) +all_structs.append(SeedTxnIdRequest) +SeedTxnIdRequest.thrift_spec = ( + None, # 0 + ( + 1, + TType.I64, + "seedTxnId", + None, + None, + ), # 1 +) +all_structs.append(LockComponent) +LockComponent.thrift_spec = ( + None, # 0 + ( + 1, + TType.I32, + "type", + None, + None, + ), # 1 + ( + 2, + TType.I32, + "level", + None, + None, + ), # 2 + ( + 3, + TType.STRING, + "dbname", + "UTF8", + None, + ), # 3 + ( + 4, + TType.STRING, + "tablename", + "UTF8", + None, + ), # 4 + ( + 5, + TType.STRING, + "partitionname", + "UTF8", + None, + ), # 5 + ( + 6, + TType.I32, + "operationType", + None, + 5, + ), # 6 + ( + 7, + TType.BOOL, + "isTransactional", + None, + False, + ), # 7 + ( + 8, + TType.BOOL, + "isDynamicPartitionWrite", + None, + False, + ), # 8 +) +all_structs.append(LockRequest) +LockRequest.thrift_spec = ( + None, # 0 + ( + 1, + TType.LIST, + "component", + (TType.STRUCT, [LockComponent, None], False), + None, + ), # 1 + ( + 2, + TType.I64, + "txnid", + None, + None, + ), # 2 + ( + 3, + TType.STRING, + "user", + "UTF8", + None, + ), # 3 + ( + 4, + TType.STRING, + "hostname", + "UTF8", + None, + ), # 4 + ( + 5, + TType.STRING, + "agentInfo", + "UTF8", + "Unknown", + ), # 5 + ( + 6, + TType.BOOL, + "zeroWaitReadEnabled", + None, + False, + ), # 6 + ( + 7, + TType.BOOL, + "exclusiveCTAS", + None, + False, + ), # 7 +) +all_structs.append(LockResponse) +LockResponse.thrift_spec = ( + None, # 0 + ( + 1, + TType.I64, + "lockid", + None, + None, + ), # 1 + ( + 2, + TType.I32, + "state", + None, + None, + ), # 2 + ( + 3, + TType.STRING, + "errorMessage", + "UTF8", + None, + ), # 3 +) +all_structs.append(CheckLockRequest) +CheckLockRequest.thrift_spec = ( + None, # 0 + ( + 1, + TType.I64, + "lockid", + None, + None, + ), # 1 + ( + 2, + TType.I64, + "txnid", + None, + None, + ), # 2 + ( + 3, + TType.I64, + "elapsed_ms", + None, + None, + ), # 3 +) +all_structs.append(UnlockRequest) +UnlockRequest.thrift_spec = ( + None, # 0 + ( + 1, + TType.I64, + "lockid", + None, + None, + ), # 1 +) +all_structs.append(ShowLocksRequest) +ShowLocksRequest.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "dbname", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "tablename", + "UTF8", + None, + ), # 2 + ( + 3, + TType.STRING, + "partname", + "UTF8", + None, + ), # 3 + ( + 4, + TType.BOOL, + "isExtended", + None, + False, + ), # 4 + ( + 5, + TType.I64, + "txnid", + None, + None, + ), # 5 +) +all_structs.append(ShowLocksResponseElement) +ShowLocksResponseElement.thrift_spec = ( + None, # 0 + ( + 1, + TType.I64, + "lockid", + None, + None, + ), # 1 + ( + 2, + TType.STRING, + "dbname", + "UTF8", + None, + ), # 2 + ( + 3, + TType.STRING, + "tablename", + "UTF8", + None, + ), # 3 + ( + 4, + TType.STRING, + "partname", + "UTF8", + None, + ), # 4 + ( + 5, + TType.I32, + "state", + None, + None, + ), # 5 + ( + 6, + TType.I32, + "type", + None, + None, + ), # 6 + ( + 7, + TType.I64, + "txnid", + None, + None, + ), # 7 + ( + 8, + TType.I64, + "lastheartbeat", + None, + None, + ), # 8 + ( + 9, + TType.I64, + "acquiredat", + None, + None, + ), # 9 + ( + 10, + TType.STRING, + "user", + "UTF8", + None, + ), # 10 + ( + 11, + TType.STRING, + "hostname", + "UTF8", + None, + ), # 11 + ( + 12, + TType.I32, + "heartbeatCount", + None, + 0, + ), # 12 + ( + 13, + TType.STRING, + "agentInfo", + "UTF8", + None, + ), # 13 + ( + 14, + TType.I64, + "blockedByExtId", + None, + None, + ), # 14 + ( + 15, + TType.I64, + "blockedByIntId", + None, + None, + ), # 15 + ( + 16, + TType.I64, + "lockIdInternal", + None, + None, + ), # 16 +) +all_structs.append(ShowLocksResponse) +ShowLocksResponse.thrift_spec = ( + None, # 0 + ( + 1, + TType.LIST, + "locks", + (TType.STRUCT, [ShowLocksResponseElement, None], False), + None, + ), # 1 +) +all_structs.append(HeartbeatRequest) +HeartbeatRequest.thrift_spec = ( + None, # 0 + ( + 1, + TType.I64, + "lockid", + None, + None, + ), # 1 + ( + 2, + TType.I64, + "txnid", + None, + None, + ), # 2 +) +all_structs.append(HeartbeatTxnRangeRequest) +HeartbeatTxnRangeRequest.thrift_spec = ( + None, # 0 + ( + 1, + TType.I64, + "min", + None, + None, + ), # 1 + ( + 2, + TType.I64, + "max", + None, + None, + ), # 2 +) +all_structs.append(HeartbeatTxnRangeResponse) +HeartbeatTxnRangeResponse.thrift_spec = ( + None, # 0 + ( + 1, + TType.SET, + "aborted", + (TType.I64, None, False), + None, + ), # 1 + ( + 2, + TType.SET, + "nosuch", + (TType.I64, None, False), + None, + ), # 2 +) +all_structs.append(CompactionRequest) +CompactionRequest.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "dbname", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "tablename", + "UTF8", + None, + ), # 2 + ( + 3, + TType.STRING, + "partitionname", + "UTF8", + None, + ), # 3 + ( + 4, + TType.I32, + "type", + None, + None, + ), # 4 + ( + 5, + TType.STRING, + "runas", + "UTF8", + None, + ), # 5 + ( + 6, + TType.MAP, + "properties", + (TType.STRING, "UTF8", TType.STRING, "UTF8", False), + None, + ), # 6 + ( + 7, + TType.STRING, + "initiatorId", + "UTF8", + None, + ), # 7 + ( + 8, + TType.STRING, + "initiatorVersion", + "UTF8", + None, + ), # 8 +) +all_structs.append(CompactionInfoStruct) +CompactionInfoStruct.thrift_spec = ( + None, # 0 + ( + 1, + TType.I64, + "id", + None, + None, + ), # 1 + ( + 2, + TType.STRING, + "dbname", + "UTF8", + None, + ), # 2 + ( + 3, + TType.STRING, + "tablename", + "UTF8", + None, + ), # 3 + ( + 4, + TType.STRING, + "partitionname", + "UTF8", + None, + ), # 4 + ( + 5, + TType.I32, + "type", + None, + None, + ), # 5 + ( + 6, + TType.STRING, + "runas", + "UTF8", + None, + ), # 6 + ( + 7, + TType.STRING, + "properties", + "UTF8", + None, + ), # 7 + ( + 8, + TType.BOOL, + "toomanyaborts", + None, + None, + ), # 8 + ( + 9, + TType.STRING, + "state", + "UTF8", + None, + ), # 9 + ( + 10, + TType.STRING, + "workerId", + "UTF8", + None, + ), # 10 + ( + 11, + TType.I64, + "start", + None, + None, + ), # 11 + ( + 12, + TType.I64, + "highestWriteId", + None, + None, + ), # 12 + ( + 13, + TType.STRING, + "errorMessage", + "UTF8", + None, + ), # 13 + ( + 14, + TType.BOOL, + "hasoldabort", + None, + None, + ), # 14 + ( + 15, + TType.I64, + "enqueueTime", + None, + None, + ), # 15 + ( + 16, + TType.I64, + "retryRetention", + None, + None, + ), # 16 +) +all_structs.append(OptionalCompactionInfoStruct) +OptionalCompactionInfoStruct.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "ci", + [CompactionInfoStruct, None], + None, + ), # 1 +) +all_structs.append(CompactionMetricsDataStruct) +CompactionMetricsDataStruct.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "dbname", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "tblname", + "UTF8", + None, + ), # 2 + ( + 3, + TType.STRING, + "partitionname", + "UTF8", + None, + ), # 3 + ( + 4, + TType.I32, + "type", + None, + None, + ), # 4 + ( + 5, + TType.I32, + "metricvalue", + None, + None, + ), # 5 + ( + 6, + TType.I32, + "version", + None, + None, + ), # 6 + ( + 7, + TType.I32, + "threshold", + None, + None, + ), # 7 +) +all_structs.append(CompactionMetricsDataResponse) +CompactionMetricsDataResponse.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "data", + [CompactionMetricsDataStruct, None], + None, + ), # 1 +) +all_structs.append(CompactionMetricsDataRequest) +CompactionMetricsDataRequest.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "dbName", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "tblName", + "UTF8", + None, + ), # 2 + ( + 3, + TType.STRING, + "partitionName", + "UTF8", + None, + ), # 3 + ( + 4, + TType.I32, + "type", + None, + None, + ), # 4 +) +all_structs.append(CompactionResponse) +CompactionResponse.thrift_spec = ( + None, # 0 + ( + 1, + TType.I64, + "id", + None, + None, + ), # 1 + ( + 2, + TType.STRING, + "state", + "UTF8", + None, + ), # 2 + ( + 3, + TType.BOOL, + "accepted", + None, + None, + ), # 3 + ( + 4, + TType.STRING, + "errormessage", + "UTF8", + None, + ), # 4 +) +all_structs.append(ShowCompactRequest) +ShowCompactRequest.thrift_spec = () +all_structs.append(ShowCompactResponseElement) +ShowCompactResponseElement.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "dbname", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "tablename", + "UTF8", + None, + ), # 2 + ( + 3, + TType.STRING, + "partitionname", + "UTF8", + None, + ), # 3 + ( + 4, + TType.I32, + "type", + None, + None, + ), # 4 + ( + 5, + TType.STRING, + "state", + "UTF8", + None, + ), # 5 + ( + 6, + TType.STRING, + "workerid", + "UTF8", + None, + ), # 6 + ( + 7, + TType.I64, + "start", + None, + None, + ), # 7 + ( + 8, + TType.STRING, + "runAs", + "UTF8", + None, + ), # 8 + ( + 9, + TType.I64, + "hightestTxnId", + None, + None, + ), # 9 + ( + 10, + TType.STRING, + "metaInfo", + "UTF8", + None, + ), # 10 + ( + 11, + TType.I64, + "endTime", + None, + None, + ), # 11 + ( + 12, + TType.STRING, + "hadoopJobId", + "UTF8", + "None", + ), # 12 + ( + 13, + TType.I64, + "id", + None, + None, + ), # 13 + ( + 14, + TType.STRING, + "errorMessage", + "UTF8", + None, + ), # 14 + ( + 15, + TType.I64, + "enqueueTime", + None, + None, + ), # 15 + ( + 16, + TType.STRING, + "workerVersion", + "UTF8", + None, + ), # 16 + ( + 17, + TType.STRING, + "initiatorId", + "UTF8", + None, + ), # 17 + ( + 18, + TType.STRING, + "initiatorVersion", + "UTF8", + None, + ), # 18 + ( + 19, + TType.I64, + "cleanerStart", + None, + None, + ), # 19 +) +all_structs.append(ShowCompactResponse) +ShowCompactResponse.thrift_spec = ( + None, # 0 + ( + 1, + TType.LIST, + "compacts", + (TType.STRUCT, [ShowCompactResponseElement, None], False), + None, + ), # 1 +) +all_structs.append(GetLatestCommittedCompactionInfoRequest) +GetLatestCommittedCompactionInfoRequest.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "dbname", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "tablename", + "UTF8", + None, + ), # 2 + ( + 3, + TType.LIST, + "partitionnames", + (TType.STRING, "UTF8", False), + None, + ), # 3 + ( + 4, + TType.I64, + "lastCompactionId", + None, + None, + ), # 4 +) +all_structs.append(GetLatestCommittedCompactionInfoResponse) +GetLatestCommittedCompactionInfoResponse.thrift_spec = ( + None, # 0 + ( + 1, + TType.LIST, + "compactions", + (TType.STRUCT, [CompactionInfoStruct, None], False), + None, + ), # 1 +) +all_structs.append(FindNextCompactRequest) +FindNextCompactRequest.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "workerId", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "workerVersion", + "UTF8", + None, + ), # 2 +) +all_structs.append(AddDynamicPartitions) +AddDynamicPartitions.thrift_spec = ( + None, # 0 + ( + 1, + TType.I64, + "txnid", + None, + None, + ), # 1 + ( + 2, + TType.I64, + "writeid", + None, + None, + ), # 2 + ( + 3, + TType.STRING, + "dbname", + "UTF8", + None, + ), # 3 + ( + 4, + TType.STRING, + "tablename", + "UTF8", + None, + ), # 4 + ( + 5, + TType.LIST, + "partitionnames", + (TType.STRING, "UTF8", False), + None, + ), # 5 + ( + 6, + TType.I32, + "operationType", + None, + 5, + ), # 6 +) +all_structs.append(BasicTxnInfo) +BasicTxnInfo.thrift_spec = ( + None, # 0 + ( + 1, + TType.BOOL, + "isnull", + None, + None, + ), # 1 + ( + 2, + TType.I64, + "time", + None, + None, + ), # 2 + ( + 3, + TType.I64, + "txnid", + None, + None, + ), # 3 + ( + 4, + TType.STRING, + "dbname", + "UTF8", + None, + ), # 4 + ( + 5, + TType.STRING, + "tablename", + "UTF8", + None, + ), # 5 + ( + 6, + TType.STRING, + "partitionname", + "UTF8", + None, + ), # 6 +) +all_structs.append(NotificationEventRequest) +NotificationEventRequest.thrift_spec = ( + None, # 0 + ( + 1, + TType.I64, + "lastEvent", + None, + None, + ), # 1 + ( + 2, + TType.I32, + "maxEvents", + None, + None, + ), # 2 + ( + 3, + TType.LIST, + "eventTypeSkipList", + (TType.STRING, "UTF8", False), + None, + ), # 3 +) +all_structs.append(NotificationEvent) +NotificationEvent.thrift_spec = ( + None, # 0 + ( + 1, + TType.I64, + "eventId", + None, + None, + ), # 1 + ( + 2, + TType.I32, + "eventTime", + None, + None, + ), # 2 + ( + 3, + TType.STRING, + "eventType", + "UTF8", + None, + ), # 3 + ( + 4, + TType.STRING, + "dbName", + "UTF8", + None, + ), # 4 + ( + 5, + TType.STRING, + "tableName", + "UTF8", + None, + ), # 5 + ( + 6, + TType.STRING, + "message", + "UTF8", + None, + ), # 6 + ( + 7, + TType.STRING, + "messageFormat", + "UTF8", + None, + ), # 7 + ( + 8, + TType.STRING, + "catName", + "UTF8", + None, + ), # 8 +) +all_structs.append(NotificationEventResponse) +NotificationEventResponse.thrift_spec = ( + None, # 0 + ( + 1, + TType.LIST, + "events", + (TType.STRUCT, [NotificationEvent, None], False), + None, + ), # 1 +) +all_structs.append(CurrentNotificationEventId) +CurrentNotificationEventId.thrift_spec = ( + None, # 0 + ( + 1, + TType.I64, + "eventId", + None, + None, + ), # 1 +) +all_structs.append(NotificationEventsCountRequest) +NotificationEventsCountRequest.thrift_spec = ( + None, # 0 + ( + 1, + TType.I64, + "fromEventId", + None, + None, + ), # 1 + ( + 2, + TType.STRING, + "dbName", + "UTF8", + None, + ), # 2 + ( + 3, + TType.STRING, + "catName", + "UTF8", + None, + ), # 3 + ( + 4, + TType.I64, + "toEventId", + None, + None, + ), # 4 + ( + 5, + TType.I64, + "limit", + None, + None, + ), # 5 +) +all_structs.append(NotificationEventsCountResponse) +NotificationEventsCountResponse.thrift_spec = ( + None, # 0 + ( + 1, + TType.I64, + "eventsCount", + None, + None, + ), # 1 +) +all_structs.append(InsertEventRequestData) +InsertEventRequestData.thrift_spec = ( + None, # 0 + ( + 1, + TType.BOOL, + "replace", + None, + None, + ), # 1 + ( + 2, + TType.LIST, + "filesAdded", + (TType.STRING, "UTF8", False), + None, + ), # 2 + ( + 3, + TType.LIST, + "filesAddedChecksum", + (TType.STRING, "UTF8", False), + None, + ), # 3 + ( + 4, + TType.LIST, + "subDirectoryList", + (TType.STRING, "UTF8", False), + None, + ), # 4 + ( + 5, + TType.LIST, + "partitionVal", + (TType.STRING, "UTF8", False), + None, + ), # 5 +) +all_structs.append(FireEventRequestData) +FireEventRequestData.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "insertData", + [InsertEventRequestData, None], + None, + ), # 1 + ( + 2, + TType.LIST, + "insertDatas", + (TType.STRUCT, [InsertEventRequestData, None], False), + None, + ), # 2 +) +all_structs.append(FireEventRequest) +FireEventRequest.thrift_spec = ( + None, # 0 + ( + 1, + TType.BOOL, + "successful", + None, + None, + ), # 1 + ( + 2, + TType.STRUCT, + "data", + [FireEventRequestData, None], + None, + ), # 2 + ( + 3, + TType.STRING, + "dbName", + "UTF8", + None, + ), # 3 + ( + 4, + TType.STRING, + "tableName", + "UTF8", + None, + ), # 4 + ( + 5, + TType.LIST, + "partitionVals", + (TType.STRING, "UTF8", False), + None, + ), # 5 + ( + 6, + TType.STRING, + "catName", + "UTF8", + None, + ), # 6 +) +all_structs.append(FireEventResponse) +FireEventResponse.thrift_spec = ( + None, # 0 + ( + 1, + TType.LIST, + "eventIds", + (TType.I64, None, False), + None, + ), # 1 +) +all_structs.append(WriteNotificationLogRequest) +WriteNotificationLogRequest.thrift_spec = ( + None, # 0 + ( + 1, + TType.I64, + "txnId", + None, + None, + ), # 1 + ( + 2, + TType.I64, + "writeId", + None, + None, + ), # 2 + ( + 3, + TType.STRING, + "db", + "UTF8", + None, + ), # 3 + ( + 4, + TType.STRING, + "table", + "UTF8", + None, + ), # 4 + ( + 5, + TType.STRUCT, + "fileInfo", + [InsertEventRequestData, None], + None, + ), # 5 + ( + 6, + TType.LIST, + "partitionVals", + (TType.STRING, "UTF8", False), + None, + ), # 6 +) +all_structs.append(WriteNotificationLogResponse) +WriteNotificationLogResponse.thrift_spec = () +all_structs.append(WriteNotificationLogBatchRequest) +WriteNotificationLogBatchRequest.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "catalog", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "db", + "UTF8", + None, + ), # 2 + ( + 3, + TType.STRING, + "table", + "UTF8", + None, + ), # 3 + ( + 4, + TType.LIST, + "requestList", + (TType.STRUCT, [WriteNotificationLogRequest, None], False), + None, + ), # 4 +) +all_structs.append(WriteNotificationLogBatchResponse) +WriteNotificationLogBatchResponse.thrift_spec = () +all_structs.append(MetadataPpdResult) +MetadataPpdResult.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "metadata", + "BINARY", + None, + ), # 1 + ( + 2, + TType.STRING, + "includeBitset", + "BINARY", + None, + ), # 2 +) +all_structs.append(GetFileMetadataByExprResult) +GetFileMetadataByExprResult.thrift_spec = ( + None, # 0 + ( + 1, + TType.MAP, + "metadata", + (TType.I64, None, TType.STRUCT, [MetadataPpdResult, None], False), + None, + ), # 1 + ( + 2, + TType.BOOL, + "isSupported", + None, + None, + ), # 2 +) +all_structs.append(GetFileMetadataByExprRequest) +GetFileMetadataByExprRequest.thrift_spec = ( + None, # 0 + ( + 1, + TType.LIST, + "fileIds", + (TType.I64, None, False), + None, + ), # 1 + ( + 2, + TType.STRING, + "expr", + "BINARY", + None, + ), # 2 + ( + 3, + TType.BOOL, + "doGetFooters", + None, + None, + ), # 3 + ( + 4, + TType.I32, + "type", + None, + None, + ), # 4 +) +all_structs.append(GetFileMetadataResult) +GetFileMetadataResult.thrift_spec = ( + None, # 0 + ( + 1, + TType.MAP, + "metadata", + (TType.I64, None, TType.STRING, "BINARY", False), + None, + ), # 1 + ( + 2, + TType.BOOL, + "isSupported", + None, + None, + ), # 2 +) +all_structs.append(GetFileMetadataRequest) +GetFileMetadataRequest.thrift_spec = ( + None, # 0 + ( + 1, + TType.LIST, + "fileIds", + (TType.I64, None, False), + None, + ), # 1 +) +all_structs.append(PutFileMetadataResult) +PutFileMetadataResult.thrift_spec = () +all_structs.append(PutFileMetadataRequest) +PutFileMetadataRequest.thrift_spec = ( + None, # 0 + ( + 1, + TType.LIST, + "fileIds", + (TType.I64, None, False), + None, + ), # 1 + ( + 2, + TType.LIST, + "metadata", + (TType.STRING, "BINARY", False), + None, + ), # 2 + ( + 3, + TType.I32, + "type", + None, + None, + ), # 3 +) +all_structs.append(ClearFileMetadataResult) +ClearFileMetadataResult.thrift_spec = () +all_structs.append(ClearFileMetadataRequest) +ClearFileMetadataRequest.thrift_spec = ( + None, # 0 + ( + 1, + TType.LIST, + "fileIds", + (TType.I64, None, False), + None, + ), # 1 +) +all_structs.append(CacheFileMetadataResult) +CacheFileMetadataResult.thrift_spec = ( + None, # 0 + ( + 1, + TType.BOOL, + "isSupported", + None, + None, + ), # 1 +) +all_structs.append(CacheFileMetadataRequest) +CacheFileMetadataRequest.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "dbName", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "tblName", + "UTF8", + None, + ), # 2 + ( + 3, + TType.STRING, + "partName", + "UTF8", + None, + ), # 3 + ( + 4, + TType.BOOL, + "isAllParts", + None, + None, + ), # 4 +) +all_structs.append(GetAllFunctionsResponse) +GetAllFunctionsResponse.thrift_spec = ( + None, # 0 + ( + 1, + TType.LIST, + "functions", + (TType.STRUCT, [Function, None], False), + None, + ), # 1 +) +all_structs.append(ClientCapabilities) +ClientCapabilities.thrift_spec = ( + None, # 0 + ( + 1, + TType.LIST, + "values", + (TType.I32, None, False), + None, + ), # 1 +) +all_structs.append(GetProjectionsSpec) +GetProjectionsSpec.thrift_spec = ( + None, # 0 + ( + 1, + TType.LIST, + "fieldList", + (TType.STRING, "UTF8", False), + None, + ), # 1 + ( + 2, + TType.STRING, + "includeParamKeyPattern", + "UTF8", + None, + ), # 2 + ( + 3, + TType.STRING, + "excludeParamKeyPattern", + "UTF8", + None, + ), # 3 +) +all_structs.append(GetTableRequest) +GetTableRequest.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "dbName", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "tblName", + "UTF8", + None, + ), # 2 + ( + 3, + TType.STRUCT, + "capabilities", + [ClientCapabilities, None], + None, + ), # 3 + ( + 4, + TType.STRING, + "catName", + "UTF8", + None, + ), # 4 + None, # 5 + ( + 6, + TType.STRING, + "validWriteIdList", + "UTF8", + None, + ), # 6 + ( + 7, + TType.BOOL, + "getColumnStats", + None, + None, + ), # 7 + ( + 8, + TType.LIST, + "processorCapabilities", + (TType.STRING, "UTF8", False), + None, + ), # 8 + ( + 9, + TType.STRING, + "processorIdentifier", + "UTF8", + None, + ), # 9 + ( + 10, + TType.STRING, + "engine", + "UTF8", + None, + ), # 10 + ( + 11, + TType.I64, + "id", + None, + -1, + ), # 11 +) +all_structs.append(GetTableResult) +GetTableResult.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "table", + [Table, None], + None, + ), # 1 + ( + 2, + TType.BOOL, + "isStatsCompliant", + None, + None, + ), # 2 +) +all_structs.append(GetTablesRequest) +GetTablesRequest.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "dbName", + "UTF8", + None, + ), # 1 + ( + 2, + TType.LIST, + "tblNames", + (TType.STRING, "UTF8", False), + None, + ), # 2 + ( + 3, + TType.STRUCT, + "capabilities", + [ClientCapabilities, None], + None, + ), # 3 + ( + 4, + TType.STRING, + "catName", + "UTF8", + None, + ), # 4 + ( + 5, + TType.LIST, + "processorCapabilities", + (TType.STRING, "UTF8", False), + None, + ), # 5 + ( + 6, + TType.STRING, + "processorIdentifier", + "UTF8", + None, + ), # 6 + ( + 7, + TType.STRUCT, + "projectionSpec", + [GetProjectionsSpec, None], + None, + ), # 7 + ( + 8, + TType.STRING, + "tablesPattern", + "UTF8", + None, + ), # 8 +) +all_structs.append(GetTablesResult) +GetTablesResult.thrift_spec = ( + None, # 0 + ( + 1, + TType.LIST, + "tables", + (TType.STRUCT, [Table, None], False), + None, + ), # 1 +) +all_structs.append(GetTablesExtRequest) +GetTablesExtRequest.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "catalog", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "database", + "UTF8", + None, + ), # 2 + ( + 3, + TType.STRING, + "tableNamePattern", + "UTF8", + None, + ), # 3 + ( + 4, + TType.I32, + "requestedFields", + None, + None, + ), # 4 + ( + 5, + TType.I32, + "limit", + None, + None, + ), # 5 + ( + 6, + TType.LIST, + "processorCapabilities", + (TType.STRING, "UTF8", False), + None, + ), # 6 + ( + 7, + TType.STRING, + "processorIdentifier", + "UTF8", + None, + ), # 7 +) +all_structs.append(ExtendedTableInfo) +ExtendedTableInfo.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "tblName", + "UTF8", + None, + ), # 1 + ( + 2, + TType.I32, + "accessType", + None, + None, + ), # 2 + ( + 3, + TType.LIST, + "requiredReadCapabilities", + (TType.STRING, "UTF8", False), + None, + ), # 3 + ( + 4, + TType.LIST, + "requiredWriteCapabilities", + (TType.STRING, "UTF8", False), + None, + ), # 4 +) +all_structs.append(GetDatabaseRequest) +GetDatabaseRequest.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "name", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "catalogName", + "UTF8", + None, + ), # 2 + ( + 3, + TType.LIST, + "processorCapabilities", + (TType.STRING, "UTF8", False), + None, + ), # 3 + ( + 4, + TType.STRING, + "processorIdentifier", + "UTF8", + None, + ), # 4 +) +all_structs.append(DropDatabaseRequest) +DropDatabaseRequest.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "name", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "catalogName", + "UTF8", + None, + ), # 2 + ( + 3, + TType.BOOL, + "ignoreUnknownDb", + None, + None, + ), # 3 + ( + 4, + TType.BOOL, + "deleteData", + None, + None, + ), # 4 + ( + 5, + TType.BOOL, + "cascade", + None, + None, + ), # 5 + ( + 6, + TType.BOOL, + "softDelete", + None, + False, + ), # 6 + ( + 7, + TType.I64, + "txnId", + None, + 0, + ), # 7 + ( + 8, + TType.BOOL, + "deleteManagedDir", + None, + True, + ), # 8 +) +all_structs.append(CmRecycleRequest) +CmRecycleRequest.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "dataPath", + "UTF8", + None, + ), # 1 + ( + 2, + TType.BOOL, + "purge", + None, + None, + ), # 2 +) +all_structs.append(CmRecycleResponse) +CmRecycleResponse.thrift_spec = () +all_structs.append(TableMeta) +TableMeta.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "dbName", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "tableName", + "UTF8", + None, + ), # 2 + ( + 3, + TType.STRING, + "tableType", + "UTF8", + None, + ), # 3 + ( + 4, + TType.STRING, + "comments", + "UTF8", + None, + ), # 4 + ( + 5, + TType.STRING, + "catName", + "UTF8", + None, + ), # 5 +) +all_structs.append(Materialization) +Materialization.thrift_spec = ( + None, # 0 + ( + 1, + TType.BOOL, + "sourceTablesUpdateDeleteModified", + None, + None, + ), # 1 + ( + 2, + TType.BOOL, + "sourceTablesCompacted", + None, + None, + ), # 2 +) +all_structs.append(WMResourcePlan) +WMResourcePlan.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "name", + "UTF8", + None, + ), # 1 + ( + 2, + TType.I32, + "status", + None, + None, + ), # 2 + ( + 3, + TType.I32, + "queryParallelism", + None, + None, + ), # 3 + ( + 4, + TType.STRING, + "defaultPoolPath", + "UTF8", + None, + ), # 4 + ( + 5, + TType.STRING, + "ns", + "UTF8", + None, + ), # 5 +) +all_structs.append(WMNullableResourcePlan) +WMNullableResourcePlan.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "name", + "UTF8", + None, + ), # 1 + ( + 2, + TType.I32, + "status", + None, + None, + ), # 2 + None, # 3 + ( + 4, + TType.I32, + "queryParallelism", + None, + None, + ), # 4 + ( + 5, + TType.BOOL, + "isSetQueryParallelism", + None, + None, + ), # 5 + ( + 6, + TType.STRING, + "defaultPoolPath", + "UTF8", + None, + ), # 6 + ( + 7, + TType.BOOL, + "isSetDefaultPoolPath", + None, + None, + ), # 7 + ( + 8, + TType.STRING, + "ns", + "UTF8", + None, + ), # 8 +) +all_structs.append(WMPool) +WMPool.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "resourcePlanName", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "poolPath", + "UTF8", + None, + ), # 2 + ( + 3, + TType.DOUBLE, + "allocFraction", + None, + None, + ), # 3 + ( + 4, + TType.I32, + "queryParallelism", + None, + None, + ), # 4 + ( + 5, + TType.STRING, + "schedulingPolicy", + "UTF8", + None, + ), # 5 + ( + 6, + TType.STRING, + "ns", + "UTF8", + None, + ), # 6 +) +all_structs.append(WMNullablePool) +WMNullablePool.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "resourcePlanName", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "poolPath", + "UTF8", + None, + ), # 2 + ( + 3, + TType.DOUBLE, + "allocFraction", + None, + None, + ), # 3 + ( + 4, + TType.I32, + "queryParallelism", + None, + None, + ), # 4 + ( + 5, + TType.STRING, + "schedulingPolicy", + "UTF8", + None, + ), # 5 + ( + 6, + TType.BOOL, + "isSetSchedulingPolicy", + None, + None, + ), # 6 + ( + 7, + TType.STRING, + "ns", + "UTF8", + None, + ), # 7 +) +all_structs.append(WMTrigger) +WMTrigger.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "resourcePlanName", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "triggerName", + "UTF8", + None, + ), # 2 + ( + 3, + TType.STRING, + "triggerExpression", + "UTF8", + None, + ), # 3 + ( + 4, + TType.STRING, + "actionExpression", + "UTF8", + None, + ), # 4 + ( + 5, + TType.BOOL, + "isInUnmanaged", + None, + None, + ), # 5 + ( + 6, + TType.STRING, + "ns", + "UTF8", + None, + ), # 6 +) +all_structs.append(WMMapping) +WMMapping.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "resourcePlanName", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "entityType", + "UTF8", + None, + ), # 2 + ( + 3, + TType.STRING, + "entityName", + "UTF8", + None, + ), # 3 + ( + 4, + TType.STRING, + "poolPath", + "UTF8", + None, + ), # 4 + ( + 5, + TType.I32, + "ordering", + None, + None, + ), # 5 + ( + 6, + TType.STRING, + "ns", + "UTF8", + None, + ), # 6 +) +all_structs.append(WMPoolTrigger) +WMPoolTrigger.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "pool", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "trigger", + "UTF8", + None, + ), # 2 + ( + 3, + TType.STRING, + "ns", + "UTF8", + None, + ), # 3 +) +all_structs.append(WMFullResourcePlan) +WMFullResourcePlan.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "plan", + [WMResourcePlan, None], + None, + ), # 1 + ( + 2, + TType.LIST, + "pools", + (TType.STRUCT, [WMPool, None], False), + None, + ), # 2 + ( + 3, + TType.LIST, + "mappings", + (TType.STRUCT, [WMMapping, None], False), + None, + ), # 3 + ( + 4, + TType.LIST, + "triggers", + (TType.STRUCT, [WMTrigger, None], False), + None, + ), # 4 + ( + 5, + TType.LIST, + "poolTriggers", + (TType.STRUCT, [WMPoolTrigger, None], False), + None, + ), # 5 +) +all_structs.append(WMCreateResourcePlanRequest) +WMCreateResourcePlanRequest.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "resourcePlan", + [WMResourcePlan, None], + None, + ), # 1 + ( + 2, + TType.STRING, + "copyFrom", + "UTF8", + None, + ), # 2 +) +all_structs.append(WMCreateResourcePlanResponse) +WMCreateResourcePlanResponse.thrift_spec = () +all_structs.append(WMGetActiveResourcePlanRequest) +WMGetActiveResourcePlanRequest.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "ns", + "UTF8", + None, + ), # 1 +) +all_structs.append(WMGetActiveResourcePlanResponse) +WMGetActiveResourcePlanResponse.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "resourcePlan", + [WMFullResourcePlan, None], + None, + ), # 1 +) +all_structs.append(WMGetResourcePlanRequest) +WMGetResourcePlanRequest.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "resourcePlanName", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "ns", + "UTF8", + None, + ), # 2 +) +all_structs.append(WMGetResourcePlanResponse) +WMGetResourcePlanResponse.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "resourcePlan", + [WMFullResourcePlan, None], + None, + ), # 1 +) +all_structs.append(WMGetAllResourcePlanRequest) +WMGetAllResourcePlanRequest.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "ns", + "UTF8", + None, + ), # 1 +) +all_structs.append(WMGetAllResourcePlanResponse) +WMGetAllResourcePlanResponse.thrift_spec = ( + None, # 0 + ( + 1, + TType.LIST, + "resourcePlans", + (TType.STRUCT, [WMResourcePlan, None], False), + None, + ), # 1 +) +all_structs.append(WMAlterResourcePlanRequest) +WMAlterResourcePlanRequest.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "resourcePlanName", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRUCT, + "resourcePlan", + [WMNullableResourcePlan, None], + None, + ), # 2 + ( + 3, + TType.BOOL, + "isEnableAndActivate", + None, + None, + ), # 3 + ( + 4, + TType.BOOL, + "isForceDeactivate", + None, + None, + ), # 4 + ( + 5, + TType.BOOL, + "isReplace", + None, + None, + ), # 5 + ( + 6, + TType.STRING, + "ns", + "UTF8", + None, + ), # 6 +) +all_structs.append(WMAlterResourcePlanResponse) +WMAlterResourcePlanResponse.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "fullResourcePlan", + [WMFullResourcePlan, None], + None, + ), # 1 +) +all_structs.append(WMValidateResourcePlanRequest) +WMValidateResourcePlanRequest.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "resourcePlanName", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "ns", + "UTF8", + None, + ), # 2 +) +all_structs.append(WMValidateResourcePlanResponse) +WMValidateResourcePlanResponse.thrift_spec = ( + None, # 0 + ( + 1, + TType.LIST, + "errors", + (TType.STRING, "UTF8", False), + None, + ), # 1 + ( + 2, + TType.LIST, + "warnings", + (TType.STRING, "UTF8", False), + None, + ), # 2 +) +all_structs.append(WMDropResourcePlanRequest) +WMDropResourcePlanRequest.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "resourcePlanName", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "ns", + "UTF8", + None, + ), # 2 +) +all_structs.append(WMDropResourcePlanResponse) +WMDropResourcePlanResponse.thrift_spec = () +all_structs.append(WMCreateTriggerRequest) +WMCreateTriggerRequest.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "trigger", + [WMTrigger, None], + None, + ), # 1 +) +all_structs.append(WMCreateTriggerResponse) +WMCreateTriggerResponse.thrift_spec = () +all_structs.append(WMAlterTriggerRequest) +WMAlterTriggerRequest.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "trigger", + [WMTrigger, None], + None, + ), # 1 +) +all_structs.append(WMAlterTriggerResponse) +WMAlterTriggerResponse.thrift_spec = () +all_structs.append(WMDropTriggerRequest) +WMDropTriggerRequest.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "resourcePlanName", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "triggerName", + "UTF8", + None, + ), # 2 + ( + 3, + TType.STRING, + "ns", + "UTF8", + None, + ), # 3 +) +all_structs.append(WMDropTriggerResponse) +WMDropTriggerResponse.thrift_spec = () +all_structs.append(WMGetTriggersForResourePlanRequest) +WMGetTriggersForResourePlanRequest.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "resourcePlanName", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "ns", + "UTF8", + None, + ), # 2 +) +all_structs.append(WMGetTriggersForResourePlanResponse) +WMGetTriggersForResourePlanResponse.thrift_spec = ( + None, # 0 + ( + 1, + TType.LIST, + "triggers", + (TType.STRUCT, [WMTrigger, None], False), + None, + ), # 1 +) +all_structs.append(WMCreatePoolRequest) +WMCreatePoolRequest.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "pool", + [WMPool, None], + None, + ), # 1 +) +all_structs.append(WMCreatePoolResponse) +WMCreatePoolResponse.thrift_spec = () +all_structs.append(WMAlterPoolRequest) +WMAlterPoolRequest.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "pool", + [WMNullablePool, None], + None, + ), # 1 + ( + 2, + TType.STRING, + "poolPath", + "UTF8", + None, + ), # 2 +) +all_structs.append(WMAlterPoolResponse) +WMAlterPoolResponse.thrift_spec = () +all_structs.append(WMDropPoolRequest) +WMDropPoolRequest.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "resourcePlanName", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "poolPath", + "UTF8", + None, + ), # 2 + ( + 3, + TType.STRING, + "ns", + "UTF8", + None, + ), # 3 +) +all_structs.append(WMDropPoolResponse) +WMDropPoolResponse.thrift_spec = () +all_structs.append(WMCreateOrUpdateMappingRequest) +WMCreateOrUpdateMappingRequest.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "mapping", + [WMMapping, None], + None, + ), # 1 + ( + 2, + TType.BOOL, + "update", + None, + None, + ), # 2 +) +all_structs.append(WMCreateOrUpdateMappingResponse) +WMCreateOrUpdateMappingResponse.thrift_spec = () +all_structs.append(WMDropMappingRequest) +WMDropMappingRequest.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "mapping", + [WMMapping, None], + None, + ), # 1 +) +all_structs.append(WMDropMappingResponse) +WMDropMappingResponse.thrift_spec = () +all_structs.append(WMCreateOrDropTriggerToPoolMappingRequest) +WMCreateOrDropTriggerToPoolMappingRequest.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "resourcePlanName", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "triggerName", + "UTF8", + None, + ), # 2 + ( + 3, + TType.STRING, + "poolPath", + "UTF8", + None, + ), # 3 + ( + 4, + TType.BOOL, + "drop", + None, + None, + ), # 4 + ( + 5, + TType.STRING, + "ns", + "UTF8", + None, + ), # 5 +) +all_structs.append(WMCreateOrDropTriggerToPoolMappingResponse) +WMCreateOrDropTriggerToPoolMappingResponse.thrift_spec = () +all_structs.append(ISchema) +ISchema.thrift_spec = ( + None, # 0 + ( + 1, + TType.I32, + "schemaType", + None, + None, + ), # 1 + ( + 2, + TType.STRING, + "name", + "UTF8", + None, + ), # 2 + ( + 3, + TType.STRING, + "catName", + "UTF8", + None, + ), # 3 + ( + 4, + TType.STRING, + "dbName", + "UTF8", + None, + ), # 4 + ( + 5, + TType.I32, + "compatibility", + None, + None, + ), # 5 + ( + 6, + TType.I32, + "validationLevel", + None, + None, + ), # 6 + ( + 7, + TType.BOOL, + "canEvolve", + None, + None, + ), # 7 + ( + 8, + TType.STRING, + "schemaGroup", + "UTF8", + None, + ), # 8 + ( + 9, + TType.STRING, + "description", + "UTF8", + None, + ), # 9 +) +all_structs.append(ISchemaName) +ISchemaName.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "catName", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "dbName", + "UTF8", + None, + ), # 2 + ( + 3, + TType.STRING, + "schemaName", + "UTF8", + None, + ), # 3 +) +all_structs.append(AlterISchemaRequest) +AlterISchemaRequest.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "name", + [ISchemaName, None], + None, + ), # 1 + None, # 2 + ( + 3, + TType.STRUCT, + "newSchema", + [ISchema, None], + None, + ), # 3 +) +all_structs.append(SchemaVersion) +SchemaVersion.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "schema", + [ISchemaName, None], + None, + ), # 1 + ( + 2, + TType.I32, + "version", + None, + None, + ), # 2 + ( + 3, + TType.I64, + "createdAt", + None, + None, + ), # 3 + ( + 4, + TType.LIST, + "cols", + (TType.STRUCT, [FieldSchema, None], False), + None, + ), # 4 + ( + 5, + TType.I32, + "state", + None, + None, + ), # 5 + ( + 6, + TType.STRING, + "description", + "UTF8", + None, + ), # 6 + ( + 7, + TType.STRING, + "schemaText", + "UTF8", + None, + ), # 7 + ( + 8, + TType.STRING, + "fingerprint", + "UTF8", + None, + ), # 8 + ( + 9, + TType.STRING, + "name", + "UTF8", + None, + ), # 9 + ( + 10, + TType.STRUCT, + "serDe", + [SerDeInfo, None], + None, + ), # 10 +) +all_structs.append(SchemaVersionDescriptor) +SchemaVersionDescriptor.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "schema", + [ISchemaName, None], + None, + ), # 1 + ( + 2, + TType.I32, + "version", + None, + None, + ), # 2 +) +all_structs.append(FindSchemasByColsRqst) +FindSchemasByColsRqst.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "colName", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "colNamespace", + "UTF8", + None, + ), # 2 + ( + 3, + TType.STRING, + "type", + "UTF8", + None, + ), # 3 +) +all_structs.append(FindSchemasByColsResp) +FindSchemasByColsResp.thrift_spec = ( + None, # 0 + ( + 1, + TType.LIST, + "schemaVersions", + (TType.STRUCT, [SchemaVersionDescriptor, None], False), + None, + ), # 1 +) +all_structs.append(MapSchemaVersionToSerdeRequest) +MapSchemaVersionToSerdeRequest.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "schemaVersion", + [SchemaVersionDescriptor, None], + None, + ), # 1 + ( + 2, + TType.STRING, + "serdeName", + "UTF8", + None, + ), # 2 +) +all_structs.append(SetSchemaVersionStateRequest) +SetSchemaVersionStateRequest.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "schemaVersion", + [SchemaVersionDescriptor, None], + None, + ), # 1 + ( + 2, + TType.I32, + "state", + None, + None, + ), # 2 +) +all_structs.append(GetSerdeRequest) +GetSerdeRequest.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "serdeName", + "UTF8", + None, + ), # 1 +) +all_structs.append(RuntimeStat) +RuntimeStat.thrift_spec = ( + None, # 0 + ( + 1, + TType.I32, + "createTime", + None, + None, + ), # 1 + ( + 2, + TType.I32, + "weight", + None, + None, + ), # 2 + ( + 3, + TType.STRING, + "payload", + "BINARY", + None, + ), # 3 +) +all_structs.append(GetRuntimeStatsRequest) +GetRuntimeStatsRequest.thrift_spec = ( + None, # 0 + ( + 1, + TType.I32, + "maxWeight", + None, + None, + ), # 1 + ( + 2, + TType.I32, + "maxCreateTime", + None, + None, + ), # 2 +) +all_structs.append(CreateTableRequest) +CreateTableRequest.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "table", + [Table, None], + None, + ), # 1 + ( + 2, + TType.STRUCT, + "envContext", + [EnvironmentContext, None], + None, + ), # 2 + ( + 3, + TType.LIST, + "primaryKeys", + (TType.STRUCT, [SQLPrimaryKey, None], False), + None, + ), # 3 + ( + 4, + TType.LIST, + "foreignKeys", + (TType.STRUCT, [SQLForeignKey, None], False), + None, + ), # 4 + ( + 5, + TType.LIST, + "uniqueConstraints", + (TType.STRUCT, [SQLUniqueConstraint, None], False), + None, + ), # 5 + ( + 6, + TType.LIST, + "notNullConstraints", + (TType.STRUCT, [SQLNotNullConstraint, None], False), + None, + ), # 6 + ( + 7, + TType.LIST, + "defaultConstraints", + (TType.STRUCT, [SQLDefaultConstraint, None], False), + None, + ), # 7 + ( + 8, + TType.LIST, + "checkConstraints", + (TType.STRUCT, [SQLCheckConstraint, None], False), + None, + ), # 8 + ( + 9, + TType.LIST, + "processorCapabilities", + (TType.STRING, "UTF8", False), + None, + ), # 9 + ( + 10, + TType.STRING, + "processorIdentifier", + "UTF8", + None, + ), # 10 +) +all_structs.append(CreateDatabaseRequest) +CreateDatabaseRequest.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "databaseName", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "description", + "UTF8", + None, + ), # 2 + ( + 3, + TType.STRING, + "locationUri", + "UTF8", + None, + ), # 3 + ( + 4, + TType.MAP, + "parameters", + (TType.STRING, "UTF8", TType.STRING, "UTF8", False), + None, + ), # 4 + ( + 5, + TType.STRUCT, + "privileges", + [PrincipalPrivilegeSet, None], + None, + ), # 5 + ( + 6, + TType.STRING, + "ownerName", + "UTF8", + None, + ), # 6 + ( + 7, + TType.I32, + "ownerType", + None, + None, + ), # 7 + ( + 8, + TType.STRING, + "catalogName", + "UTF8", + None, + ), # 8 + ( + 9, + TType.I32, + "createTime", + None, + None, + ), # 9 + ( + 10, + TType.STRING, + "managedLocationUri", + "UTF8", + None, + ), # 10 + ( + 11, + TType.STRING, + "type", + "UTF8", + None, + ), # 11 + ( + 12, + TType.STRING, + "dataConnectorName", + "UTF8", + None, + ), # 12 +) +all_structs.append(CreateDataConnectorRequest) +CreateDataConnectorRequest.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "connector", + [DataConnector, None], + None, + ), # 1 +) +all_structs.append(GetDataConnectorRequest) +GetDataConnectorRequest.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "connectorName", + "UTF8", + None, + ), # 1 +) +all_structs.append(ScheduledQueryPollRequest) +ScheduledQueryPollRequest.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "clusterNamespace", + "UTF8", + None, + ), # 1 +) +all_structs.append(ScheduledQueryKey) +ScheduledQueryKey.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "scheduleName", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "clusterNamespace", + "UTF8", + None, + ), # 2 +) +all_structs.append(ScheduledQueryPollResponse) +ScheduledQueryPollResponse.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "scheduleKey", + [ScheduledQueryKey, None], + None, + ), # 1 + ( + 2, + TType.I64, + "executionId", + None, + None, + ), # 2 + ( + 3, + TType.STRING, + "query", + "UTF8", + None, + ), # 3 + ( + 4, + TType.STRING, + "user", + "UTF8", + None, + ), # 4 +) +all_structs.append(ScheduledQuery) +ScheduledQuery.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "scheduleKey", + [ScheduledQueryKey, None], + None, + ), # 1 + ( + 2, + TType.BOOL, + "enabled", + None, + None, + ), # 2 + None, # 3 + ( + 4, + TType.STRING, + "schedule", + "UTF8", + None, + ), # 4 + ( + 5, + TType.STRING, + "user", + "UTF8", + None, + ), # 5 + ( + 6, + TType.STRING, + "query", + "UTF8", + None, + ), # 6 + ( + 7, + TType.I32, + "nextExecution", + None, + None, + ), # 7 +) +all_structs.append(ScheduledQueryMaintenanceRequest) +ScheduledQueryMaintenanceRequest.thrift_spec = ( + None, # 0 + ( + 1, + TType.I32, + "type", + None, + None, + ), # 1 + ( + 2, + TType.STRUCT, + "scheduledQuery", + [ScheduledQuery, None], + None, + ), # 2 +) +all_structs.append(ScheduledQueryProgressInfo) +ScheduledQueryProgressInfo.thrift_spec = ( + None, # 0 + ( + 1, + TType.I64, + "scheduledExecutionId", + None, + None, + ), # 1 + ( + 2, + TType.I32, + "state", + None, + None, + ), # 2 + ( + 3, + TType.STRING, + "executorQueryId", + "UTF8", + None, + ), # 3 + ( + 4, + TType.STRING, + "errorMessage", + "UTF8", + None, + ), # 4 +) +all_structs.append(AlterPartitionsRequest) +AlterPartitionsRequest.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "catName", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "dbName", + "UTF8", + None, + ), # 2 + ( + 3, + TType.STRING, + "tableName", + "UTF8", + None, + ), # 3 + ( + 4, + TType.LIST, + "partitions", + (TType.STRUCT, [Partition, None], False), + None, + ), # 4 + ( + 5, + TType.STRUCT, + "environmentContext", + [EnvironmentContext, None], + None, + ), # 5 + ( + 6, + TType.I64, + "writeId", + None, + -1, + ), # 6 + ( + 7, + TType.STRING, + "validWriteIdList", + "UTF8", + None, + ), # 7 +) +all_structs.append(AlterPartitionsResponse) +AlterPartitionsResponse.thrift_spec = () +all_structs.append(RenamePartitionRequest) +RenamePartitionRequest.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "catName", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "dbName", + "UTF8", + None, + ), # 2 + ( + 3, + TType.STRING, + "tableName", + "UTF8", + None, + ), # 3 + ( + 4, + TType.LIST, + "partVals", + (TType.STRING, "UTF8", False), + None, + ), # 4 + ( + 5, + TType.STRUCT, + "newPart", + [Partition, None], + None, + ), # 5 + ( + 6, + TType.STRING, + "validWriteIdList", + "UTF8", + None, + ), # 6 + ( + 7, + TType.I64, + "txnId", + None, + None, + ), # 7 + ( + 8, + TType.BOOL, + "clonePart", + None, + None, + ), # 8 +) +all_structs.append(RenamePartitionResponse) +RenamePartitionResponse.thrift_spec = () +all_structs.append(AlterTableRequest) +AlterTableRequest.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "catName", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "dbName", + "UTF8", + None, + ), # 2 + ( + 3, + TType.STRING, + "tableName", + "UTF8", + None, + ), # 3 + ( + 4, + TType.STRUCT, + "table", + [Table, None], + None, + ), # 4 + ( + 5, + TType.STRUCT, + "environmentContext", + [EnvironmentContext, None], + None, + ), # 5 + ( + 6, + TType.I64, + "writeId", + None, + -1, + ), # 6 + ( + 7, + TType.STRING, + "validWriteIdList", + "UTF8", + None, + ), # 7 + ( + 8, + TType.LIST, + "processorCapabilities", + (TType.STRING, "UTF8", False), + None, + ), # 8 + ( + 9, + TType.STRING, + "processorIdentifier", + "UTF8", + None, + ), # 9 +) +all_structs.append(AlterTableResponse) +AlterTableResponse.thrift_spec = () +all_structs.append(GetPartitionsFilterSpec) +GetPartitionsFilterSpec.thrift_spec = ( + None, # 0 + None, # 1 + None, # 2 + None, # 3 + None, # 4 + None, # 5 + None, # 6 + ( + 7, + TType.I32, + "filterMode", + None, + None, + ), # 7 + ( + 8, + TType.LIST, + "filters", + (TType.STRING, "UTF8", False), + None, + ), # 8 +) +all_structs.append(GetPartitionsResponse) +GetPartitionsResponse.thrift_spec = ( + None, # 0 + ( + 1, + TType.LIST, + "partitionSpec", + (TType.STRUCT, [PartitionSpec, None], False), + None, + ), # 1 +) +all_structs.append(GetPartitionsRequest) +GetPartitionsRequest.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "catName", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "dbName", + "UTF8", + None, + ), # 2 + ( + 3, + TType.STRING, + "tblName", + "UTF8", + None, + ), # 3 + ( + 4, + TType.BOOL, + "withAuth", + None, + None, + ), # 4 + ( + 5, + TType.STRING, + "user", + "UTF8", + None, + ), # 5 + ( + 6, + TType.LIST, + "groupNames", + (TType.STRING, "UTF8", False), + None, + ), # 6 + ( + 7, + TType.STRUCT, + "projectionSpec", + [GetProjectionsSpec, None], + None, + ), # 7 + ( + 8, + TType.STRUCT, + "filterSpec", + [GetPartitionsFilterSpec, None], + None, + ), # 8 + ( + 9, + TType.LIST, + "processorCapabilities", + (TType.STRING, "UTF8", False), + None, + ), # 9 + ( + 10, + TType.STRING, + "processorIdentifier", + "UTF8", + None, + ), # 10 + ( + 11, + TType.STRING, + "validWriteIdList", + "UTF8", + None, + ), # 11 +) +all_structs.append(GetFieldsRequest) +GetFieldsRequest.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "catName", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "dbName", + "UTF8", + None, + ), # 2 + ( + 3, + TType.STRING, + "tblName", + "UTF8", + None, + ), # 3 + ( + 4, + TType.STRUCT, + "envContext", + [EnvironmentContext, None], + None, + ), # 4 + ( + 5, + TType.STRING, + "validWriteIdList", + "UTF8", + None, + ), # 5 + ( + 6, + TType.I64, + "id", + None, + -1, + ), # 6 +) +all_structs.append(GetFieldsResponse) +GetFieldsResponse.thrift_spec = ( + None, # 0 + ( + 1, + TType.LIST, + "fields", + (TType.STRUCT, [FieldSchema, None], False), + None, + ), # 1 +) +all_structs.append(GetSchemaRequest) +GetSchemaRequest.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "catName", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "dbName", + "UTF8", + None, + ), # 2 + ( + 3, + TType.STRING, + "tblName", + "UTF8", + None, + ), # 3 + ( + 4, + TType.STRUCT, + "envContext", + [EnvironmentContext, None], + None, + ), # 4 + ( + 5, + TType.STRING, + "validWriteIdList", + "UTF8", + None, + ), # 5 + ( + 6, + TType.I64, + "id", + None, + -1, + ), # 6 +) +all_structs.append(GetSchemaResponse) +GetSchemaResponse.thrift_spec = ( + None, # 0 + ( + 1, + TType.LIST, + "fields", + (TType.STRUCT, [FieldSchema, None], False), + None, + ), # 1 +) +all_structs.append(GetPartitionRequest) +GetPartitionRequest.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "catName", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "dbName", + "UTF8", + None, + ), # 2 + ( + 3, + TType.STRING, + "tblName", + "UTF8", + None, + ), # 3 + ( + 4, + TType.LIST, + "partVals", + (TType.STRING, "UTF8", False), + None, + ), # 4 + ( + 5, + TType.STRING, + "validWriteIdList", + "UTF8", + None, + ), # 5 + ( + 6, + TType.I64, + "id", + None, + -1, + ), # 6 +) +all_structs.append(GetPartitionResponse) +GetPartitionResponse.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRUCT, + "partition", + [Partition, None], + None, + ), # 1 +) +all_structs.append(PartitionsRequest) +PartitionsRequest.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "catName", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "dbName", + "UTF8", + None, + ), # 2 + ( + 3, + TType.STRING, + "tblName", + "UTF8", + None, + ), # 3 + ( + 4, + TType.I16, + "maxParts", + None, + -1, + ), # 4 + ( + 5, + TType.STRING, + "validWriteIdList", + "UTF8", + None, + ), # 5 + ( + 6, + TType.I64, + "id", + None, + -1, + ), # 6 +) +all_structs.append(PartitionsResponse) +PartitionsResponse.thrift_spec = ( + None, # 0 + ( + 1, + TType.LIST, + "partitions", + (TType.STRUCT, [Partition, None], False), + None, + ), # 1 +) +all_structs.append(GetPartitionNamesPsRequest) +GetPartitionNamesPsRequest.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "catName", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "dbName", + "UTF8", + None, + ), # 2 + ( + 3, + TType.STRING, + "tblName", + "UTF8", + None, + ), # 3 + ( + 4, + TType.LIST, + "partValues", + (TType.STRING, "UTF8", False), + None, + ), # 4 + ( + 5, + TType.I16, + "maxParts", + None, + -1, + ), # 5 + ( + 6, + TType.STRING, + "validWriteIdList", + "UTF8", + None, + ), # 6 + ( + 7, + TType.I64, + "id", + None, + -1, + ), # 7 +) +all_structs.append(GetPartitionNamesPsResponse) +GetPartitionNamesPsResponse.thrift_spec = ( + None, # 0 + ( + 1, + TType.LIST, + "names", + (TType.STRING, "UTF8", False), + None, + ), # 1 +) +all_structs.append(GetPartitionsPsWithAuthRequest) +GetPartitionsPsWithAuthRequest.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "catName", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "dbName", + "UTF8", + None, + ), # 2 + ( + 3, + TType.STRING, + "tblName", + "UTF8", + None, + ), # 3 + ( + 4, + TType.LIST, + "partVals", + (TType.STRING, "UTF8", False), + None, + ), # 4 + ( + 5, + TType.I16, + "maxParts", + None, + -1, + ), # 5 + ( + 6, + TType.STRING, + "userName", + "UTF8", + None, + ), # 6 + ( + 7, + TType.LIST, + "groupNames", + (TType.STRING, "UTF8", False), + None, + ), # 7 + ( + 8, + TType.STRING, + "validWriteIdList", + "UTF8", + None, + ), # 8 + ( + 9, + TType.I64, + "id", + None, + -1, + ), # 9 +) +all_structs.append(GetPartitionsPsWithAuthResponse) +GetPartitionsPsWithAuthResponse.thrift_spec = ( + None, # 0 + ( + 1, + TType.LIST, + "partitions", + (TType.STRUCT, [Partition, None], False), + None, + ), # 1 +) +all_structs.append(ReplicationMetrics) +ReplicationMetrics.thrift_spec = ( + None, # 0 + ( + 1, + TType.I64, + "scheduledExecutionId", + None, + None, + ), # 1 + ( + 2, + TType.STRING, + "policy", + "UTF8", + None, + ), # 2 + ( + 3, + TType.I64, + "dumpExecutionId", + None, + None, + ), # 3 + ( + 4, + TType.STRING, + "metadata", + "UTF8", + None, + ), # 4 + ( + 5, + TType.STRING, + "progress", + "UTF8", + None, + ), # 5 + ( + 6, + TType.STRING, + "messageFormat", + "UTF8", + None, + ), # 6 +) +all_structs.append(ReplicationMetricList) +ReplicationMetricList.thrift_spec = ( + None, # 0 + ( + 1, + TType.LIST, + "replicationMetricList", + (TType.STRUCT, [ReplicationMetrics, None], False), + None, + ), # 1 +) +all_structs.append(GetReplicationMetricsRequest) +GetReplicationMetricsRequest.thrift_spec = ( + None, # 0 + ( + 1, + TType.I64, + "scheduledExecutionId", + None, + None, + ), # 1 + ( + 2, + TType.STRING, + "policy", + "UTF8", + None, + ), # 2 + ( + 3, + TType.I64, + "dumpExecutionId", + None, + None, + ), # 3 +) +all_structs.append(GetOpenTxnsRequest) +GetOpenTxnsRequest.thrift_spec = ( + None, # 0 + ( + 1, + TType.LIST, + "excludeTxnTypes", + (TType.I32, None, False), + None, + ), # 1 +) +all_structs.append(StoredProcedureRequest) +StoredProcedureRequest.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "catName", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "dbName", + "UTF8", + None, + ), # 2 + ( + 3, + TType.STRING, + "procName", + "UTF8", + None, + ), # 3 +) +all_structs.append(ListStoredProcedureRequest) +ListStoredProcedureRequest.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "catName", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "dbName", + "UTF8", + None, + ), # 2 +) +all_structs.append(StoredProcedure) +StoredProcedure.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "name", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "dbName", + "UTF8", + None, + ), # 2 + ( + 3, + TType.STRING, + "catName", + "UTF8", + None, + ), # 3 + ( + 4, + TType.STRING, + "ownerName", + "UTF8", + None, + ), # 4 + ( + 5, + TType.STRING, + "source", + "UTF8", + None, + ), # 5 +) +all_structs.append(AddPackageRequest) +AddPackageRequest.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "catName", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "dbName", + "UTF8", + None, + ), # 2 + ( + 3, + TType.STRING, + "packageName", + "UTF8", + None, + ), # 3 + ( + 4, + TType.STRING, + "ownerName", + "UTF8", + None, + ), # 4 + ( + 5, + TType.STRING, + "header", + "UTF8", + None, + ), # 5 + ( + 6, + TType.STRING, + "body", + "UTF8", + None, + ), # 6 +) +all_structs.append(GetPackageRequest) +GetPackageRequest.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "catName", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "dbName", + "UTF8", + None, + ), # 2 + ( + 3, + TType.STRING, + "packageName", + "UTF8", + None, + ), # 3 +) +all_structs.append(DropPackageRequest) +DropPackageRequest.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "catName", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "dbName", + "UTF8", + None, + ), # 2 + ( + 3, + TType.STRING, + "packageName", + "UTF8", + None, + ), # 3 +) +all_structs.append(ListPackageRequest) +ListPackageRequest.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "catName", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "dbName", + "UTF8", + None, + ), # 2 +) +all_structs.append(Package) +Package.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "catName", + "UTF8", + None, + ), # 1 + ( + 2, + TType.STRING, + "dbName", + "UTF8", + None, + ), # 2 + ( + 3, + TType.STRING, + "packageName", + "UTF8", + None, + ), # 3 + ( + 4, + TType.STRING, + "ownerName", + "UTF8", + None, + ), # 4 + ( + 5, + TType.STRING, + "header", + "UTF8", + None, + ), # 5 + ( + 6, + TType.STRING, + "body", + "UTF8", + None, + ), # 6 +) +all_structs.append(GetAllWriteEventInfoRequest) +GetAllWriteEventInfoRequest.thrift_spec = ( + None, # 0 + ( + 1, + TType.I64, + "txnId", + None, + None, + ), # 1 + ( + 2, + TType.STRING, + "dbName", + "UTF8", + None, + ), # 2 + ( + 3, + TType.STRING, + "tableName", + "UTF8", + None, + ), # 3 +) +all_structs.append(MetaException) +MetaException.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "message", + "UTF8", + None, + ), # 1 +) +all_structs.append(UnknownTableException) +UnknownTableException.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "message", + "UTF8", + None, + ), # 1 +) +all_structs.append(UnknownDBException) +UnknownDBException.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "message", + "UTF8", + None, + ), # 1 +) +all_structs.append(AlreadyExistsException) +AlreadyExistsException.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "message", + "UTF8", + None, + ), # 1 +) +all_structs.append(InvalidPartitionException) +InvalidPartitionException.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "message", + "UTF8", + None, + ), # 1 +) +all_structs.append(UnknownPartitionException) +UnknownPartitionException.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "message", + "UTF8", + None, + ), # 1 +) +all_structs.append(InvalidObjectException) +InvalidObjectException.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "message", + "UTF8", + None, + ), # 1 +) +all_structs.append(NoSuchObjectException) +NoSuchObjectException.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "message", + "UTF8", + None, + ), # 1 +) +all_structs.append(InvalidOperationException) +InvalidOperationException.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "message", + "UTF8", + None, + ), # 1 +) +all_structs.append(ConfigValSecurityException) +ConfigValSecurityException.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "message", + "UTF8", + None, + ), # 1 +) +all_structs.append(InvalidInputException) +InvalidInputException.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "message", + "UTF8", + None, + ), # 1 +) +all_structs.append(NoSuchTxnException) +NoSuchTxnException.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "message", + "UTF8", + None, + ), # 1 +) +all_structs.append(TxnAbortedException) +TxnAbortedException.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "message", + "UTF8", + None, + ), # 1 +) +all_structs.append(TxnOpenException) +TxnOpenException.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "message", + "UTF8", + None, + ), # 1 +) +all_structs.append(NoSuchLockException) +NoSuchLockException.thrift_spec = ( + None, # 0 + ( + 1, + TType.STRING, + "message", + "UTF8", + None, + ), # 1 +) +fix_spec(all_structs) +del all_structs