diff --git a/connector_importer/README.rst b/connector_importer/README.rst new file mode 100644 index 000000000..fa56a7b8e --- /dev/null +++ b/connector_importer/README.rst @@ -0,0 +1,379 @@ +================== +Connector Importer +================== + +.. + !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + !! This file is generated by oca-gen-addon-readme !! + !! changes will be overwritten. !! + !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + !! source digest: sha256:7680b9cbb3329f4f4588d69d8487b9c03cae24d89c215adb8246c96b06f950fd + !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + +.. |badge1| image:: https://img.shields.io/badge/maturity-Beta-yellow.png + :target: https://odoo-community.org/page/development-status + :alt: Beta +.. |badge2| image:: https://img.shields.io/badge/licence-AGPL--3-blue.png + :target: http://www.gnu.org/licenses/agpl-3.0-standalone.html + :alt: License: AGPL-3 +.. |badge3| image:: https://img.shields.io/badge/github-OCA%2Fconnector--interfaces-lightgray.png?logo=github + :target: https://github.com/OCA/connector-interfaces/tree/18.0/connector_importer + :alt: OCA/connector-interfaces +.. |badge4| image:: https://img.shields.io/badge/weblate-Translate%20me-F47D42.png + :target: https://translation.odoo-community.org/projects/connector-interfaces-18-0/connector-interfaces-18-0-connector_importer + :alt: Translate me on Weblate +.. |badge5| image:: https://img.shields.io/badge/runboat-Try%20me-875A7B.png + :target: https://runboat.odoo-community.org/builds?repo=OCA/connector-interfaces&target_branch=18.0 + :alt: Try me on Runboat + +|badge1| |badge2| |badge3| |badge4| |badge5| + +This module allows to import / update records from files using the +connector framework and job queue. + +To run an import you need at least: + +- a backend, hosts the global configuration of the import. +- a recordset, hosts the configuration of the import for specific + models and source +- a source, provides the data to import +- an import type, describes which models you want to import and how to + import them + +**Table of contents** + +.. contents:: + :local: + +Configuration +============= + +Import type +----------- + +Import types are the main configuration of the import. They describe +which models you want to import and how to import them. + +Exaple of configuration: + +:: + + + Import Product - all in one + product_product_all_in_one + + + - model: product.product + options: + importer: + odoo_unique_key: barcode + mapper: + name: product.product.mapper + + - model: res.partner + options: + importer: + odoo_unique_key: name + override_existing: false + mapper: + name: importer.mapper.dynamic + source_key_prefix: supplier. + source_key_whitelist: supplier.name + default_keys: + supplier_rank: 1 + + - model: product.supplierinfo + options: + importer: + odoo_unique_key: name + mapper: + name: product.supplierinfo.mapper + source_key_prefix: supplier. + + + + + +In this example we have 3 models to import one after the other using the +same source file: + +- product.product +- res.partner +- product.supplierinfo + +The import will run in the order of the configuration: first +product.product, then res.partner and finally product.supplierinfo. For +each model we have a configuration that describes how to import the +data. With the ``options`` key we can define the configuration of the +import for each component: ``importer``, ``mapper``, ``record_handler``, +``tracking_handler``. + +The are 4 main components in the import configuration: + +- importer +- mapper +- record_handler +- tracking_handler + +Each of them is responsible for a specific part of the import. + +The importer +------------ + +``importer`` is the main component that will import the data. It will +use the ``mapper`` to map the data from the source to the destination +model. If no ``name`` is defined the importer will use the default +importer for the model which is capable of importing any model. Most of +the time you don't need a specific importer. + +As the importer is the main component of the import if you want to +customize it you'll have to declare it at an higher level, next to the +``options`` key: + +:: + + - model: product.product + importer: + name: product.product.importer + options: + mapper: + name: product.product.mapper + +The importer accepts the following options: + +- ``odoo_unique_key``: the field that will be used to find the record + in Odoo. If the record is found it will be updated, otherwise it will + be created. + + NOTE: the value in the column declared as ``odoo_unique_key`` will + be treated as xid only if the name of the column is ``ìd`` or if + it starts with ``xid::``. + +- ``break_on_error``: if set to True the import will stop if an error + occurs. Default is False. + +- ``override_existing``: if set to True the existing records will be + updated. Default is True. + +- ``translation_key_sep``: the separator used to split the translation + key. Default is ``:``. See below for information about translation + keys. + +- ``translation_use_regional_lang``: if set to True the importer will + use the regional language, eg: fr_CH vs fr. + +- ``ctx``: a dictionary of values to inject in the context of the + import. + +- ``write_only``: if set to True the importer will not create new + records, it will only update existing ones. Default is False. + +The mapper +---------- + +The mapper is the component that will map the data from the source to +the destination model. + +The most flexible mapper is the ``importer.mapper.dynamic`` that will +map the data based on the model introspection and some options that you +can define. The dynamic mapper accepts the following options: + +- ``name``: the name of the mapper to use. If no name is defined the + default mapper for the model will be used. +- ``source_key_prefix``: a prefix to add to the source key. This is + useful when you want to map the same source key to different + destination fields. +- ``source_key_whitelist``: a list of source keys to import. If not + defined all the keys will be imported. +- ``source_key_blacklist``: a list of source keys to exclude from the + import. +- ``source_key_rename``: a dictionary of source keys to rename. The key + is the source key and the value is the new key. +- ``default_keys``: a dictionary of default values to set on the + destination record. The key is the field name and the value is the + default value. +- ``translation_keys``: a list of keys that will be used to translate + the data. See below for information about translation keys. +- ``required_keys``: a list of keys that are required. If one of the + keys is missing the record will be skipped. Please refer to the + documentation of the mapper to see advanced options. + +Considering the example above: + +:: + + - model: product.product + options: + mapper: + name: importer.mapper.dynamic + source_key_prefix: supplier. + source_key_whitelist: supplier.name + default_keys: + supplier_rank: 1 + +The mapper will: + +- import only keys starting with ``supplier.`` ignoring the rest +- import only the key ``supplier.name`` +- set the default value of ``supplier_rank`` to 1 + +The record_handler +------------------ + +The record handler is the component that will handle the record create +or update in Odoo. This component is responsible for: + +- finding the record in Odoo +- creating the record if not found +- updating the record if found +- handling the translations + +If no ``name`` is defined the importer will use the default record +handler for the model which is capable of handling any model. If you +want to customize the record handler you'll have to declare it at an +higher level, next to the ``options`` key: + +:: + + - model: product.product + options: + record_handler: + name: product.product.record_handler + +To find the record in Odoo the record handler will use the +``odoo_unique_key`` if defined in the importer otherwise it will +fallback to the matching domain. See below. + +The record handler accepts the following options: + +- ``name``: the name of the record handler to use. If no name is + defined the default record handler for the model will be used. + +- ``match_domain``: a domain to match the record in Odoo. When no + odoo_unique_key is provided by the importer you must provide a + match_domain. + + This key accepts a snippet returning a domain. The snippet will be + evaluated in the context of the import and will receive: + + - ``orig_values``: the values from the source + + - ``values``: values computed by the mapper for the record + + - ``env`` + + - ``user`` + + - ``datetime`` + + - ``dateutil`` + + - ``time`` + + - ``ref_id``: a function to get a record ID from a reference + + - ``ref``: a function to get a record from a reference + + Example: + + :: + + match_domain: | + [('name', '=', values.get('name'))] + +- ``must_generate_xmlid``: if set to True the importer will generate an + XML ID for the record. Default is True if the unique key is an xmlid. + +- ``skip_fields_unchanged``: if set to True the importer will skip the + fields that are unchanged. Default is False. + +Translations +------------ + +The importer can translate the data using the translation keys. The +translation keys are a list of keys (column) that will be handled as +translatable. Whenever a key is found in the translation keys the +importer will look for a column with the same name suffixed by the +language code (eg: name:fr_CH). If the column is found the importer will +translate the data using the language code as context. + +Known issues / Roadmap +====================== + +- with the import of standard Odoo CSV files, a concurrency error + occurs when updating the report_data of import_recordset table (from + the importer: self._do_report() -> self.recordset.set_report(...)). + The job is automatically retried a second time (without concurrency + errors). For small files it's not a big issue, but for files with a + huge amount of lines it takes time to process them two times. +- move generic functions from utils.mapper_utils to the connector + module +- unit tests for record handler and tracker +- add more test coverage for mapper utils and dynamic mapper +- consider making dynamic mapper the default one +- control how to generate xid (eg: from a specicic field with key + must_generate_xmlid_from_key) +- add manual control for backend_to_rel mappers +- refactor source to be a specific m2o to ease mgmt instead of a + generic relation + +Bug Tracker +=========== + +Bugs are tracked on `GitHub Issues `_. +In case of trouble, please check there if your issue has already been reported. +If you spotted it first, help us to smash it by providing a detailed and welcomed +`feedback `_. + +Do not contact contributors directly about support or help with technical issues. + +Credits +======= + +Authors +------- + +* Camptocamp + +Contributors +------------ + +Simone Orsi (Camptocamp) for the original implementation. + +Other contributors include: + +- Guewen Baconnier (Camptocamp) +- Mykhailo Panarin (Camptocamp) +- Sébastien Alix (Camptocamp) +- Thien Vo (Trobz) + +Other credits +------------- + +The migration of this module from 16.0 to 18.0 was financially supported +by Camptocamp. + +Maintainers +----------- + +This module is maintained by the OCA. + +.. image:: https://odoo-community.org/logo.png + :alt: Odoo Community Association + :target: https://odoo-community.org + +OCA, or the Odoo Community Association, is a nonprofit organization whose +mission is to support the collaborative development of Odoo features and +promote its widespread use. + +.. |maintainer-simahawk| image:: https://github.com/simahawk.png?size=40px + :target: https://github.com/simahawk + :alt: simahawk + +Current `maintainer `__: + +|maintainer-simahawk| + +This module is part of the `OCA/connector-interfaces `_ project on GitHub. + +You are welcome to contribute. To learn how please visit https://odoo-community.org/page/Contribute. diff --git a/connector_importer/__init__.py b/connector_importer/__init__.py new file mode 100644 index 000000000..5f713a85c --- /dev/null +++ b/connector_importer/__init__.py @@ -0,0 +1,3 @@ +from . import models +from . import components +from . import controllers diff --git a/connector_importer/__manifest__.py b/connector_importer/__manifest__.py new file mode 100644 index 000000000..b0fd7d9ba --- /dev/null +++ b/connector_importer/__manifest__.py @@ -0,0 +1,30 @@ +# Author: Simone Orsi +# Copyright 2018 Camptocamp SA +# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html). + +{ + "name": "Connector Importer", + "summary": """This module takes care of import sessions.""", + "version": "18.0.1.0.0", + "depends": ["connector", "queue_job"], + "author": "Camptocamp, Odoo Community Association (OCA)", + "license": "AGPL-3", + "category": "Connector", + "website": "https://github.com/OCA/connector-interfaces", + "maintainers": ["simahawk"], + "data": [ + "data/ir_cron.xml", + "data/queue_job_function_data.xml", + "security/security.xml", + "security/ir.model.access.csv", + "views/backend_views.xml", + "views/recordset_views.xml", + "views/import_type_views.xml", + "views/source_views.xml", + "views/report_template.xml", + "views/docs_template.xml", + "views/source_config_template.xml", + "menuitems.xml", + ], + "external_dependencies": {"python": ["chardet", "pytz", "pyyaml"]}, +} diff --git a/connector_importer/components/__init__.py b/connector_importer/components/__init__.py new file mode 100644 index 000000000..da0aecae2 --- /dev/null +++ b/connector_importer/components/__init__.py @@ -0,0 +1,10 @@ +from . import base +from . import tracker +from . import odoorecord +from . import odoorecord_csv_std +from . import importer +from . import importer_csv_std +from . import mapper +from . import automapper +from . import dynamicmapper +from . import listeners diff --git a/connector_importer/components/automapper.py b/connector_importer/components/automapper.py new file mode 100644 index 000000000..3bc99c89f --- /dev/null +++ b/connector_importer/components/automapper.py @@ -0,0 +1,17 @@ +# Copyright 2019 Camptocamp SA +# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl) + +from odoo.addons.component.core import Component +from odoo.addons.connector.components.mapper import mapping + + +class AutoMapper(Component): + _name = "importer.mapper.auto" + _inherit = "importer.base.mapper" + _usage = "importer.automapper" + + @mapping + def auto_mapping(self, record): + """Generate the values automatically by removing internal keys.""" + result = {k: v for k, v in record.items() if not k.startswith("_")} + return result diff --git a/connector_importer/components/base.py b/connector_importer/components/base.py new file mode 100644 index 000000000..1f61fff06 --- /dev/null +++ b/connector_importer/components/base.py @@ -0,0 +1,11 @@ +# Author: Simone Orsi +# Copyright 2018 Camptocamp SA +# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html). + +from odoo.addons.component.core import AbstractComponent + + +class ImporterComponent(AbstractComponent): + _name = "importer.base.component" + _inherit = "base.connector" + _collection = "import.backend" diff --git a/connector_importer/components/dynamicmapper.py b/connector_importer/components/dynamicmapper.py new file mode 100644 index 000000000..223a9c825 --- /dev/null +++ b/connector_importer/components/dynamicmapper.py @@ -0,0 +1,220 @@ +# Copyright 2019 Camptocamp SA +# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl) + +from odoo.addons.component.core import Component +from odoo.addons.connector.components.mapper import mapping + +from ..log import logger +from ..utils.mapper_utils import backend_to_rel, convert, xmlid_to_rel + + +class DynamicMapper(Component): + """A mapper that dynamically converts input data to odoo fields values. + + The behavior is affected by the options provided to the mapper work ctx. + Normally these options are provided by the importer component + that will load them from the import type yaml conf: + + options: + mapper: + source_key_whitelist: [] + source_key_blacklist: [] + source_key_empty_skip: [] + source_key_prefix: "" + source_key_rename: {} + converter: {} + + `source_key_whitelist` and `source_key_blacklist` are used to filter the keys. + `source_key_empty_skip` is used to skip keys when + empty or no value is computed for them. + `source_key_prefix` is to consider only keys that start with the given prefix. + + It's a sort of whitelist but it allows to filter keys dynamically + which is very handy when importing more than one model per import type. + + `source_key_rename` is used to rename source keys to + destination key (the real odoo field). + `converter` is used to define custom converter options for specific fields. + + The value must be a dict containing the params to propagate to + the converter function. + Eg: for a m2o field `partner_id` that needs to be converted to a + res.partner record + the converter option could be: + + converter: + partner_id: + create_missing: true + search_field: "ref" + + The options are in fact the args that the converter functions accept. + Have a look at the `convert` function in `mapper_utils.py` for more details. + """ + + _name = "importer.mapper.dynamic" + _inherit = "importer.base.mapper" + _usage = "importer.dynamicmapper" + + @mapping + def dynamic_fields(self, record): + """Resolve values for non mapped keys. + + :param record: a dictionary of key/value pairs coming from the source data + already prepared by the importer. + """ + # TODO: add tests! + model = self.work.model_name + vals = {} + available_fields = self.env[model].fields_get() + prefix = self._source_key_prefix + clean_record = self._clean_record(record) + required_keys = self._required_keys() + missing_required_keys = [] + for source_fname in self._non_mapped_keys(clean_record): + if source_fname in ("id", "xid::id"): + # Never convert IDs + continue + fname = source_fname + if "::" in fname: + # Eg: transformers like `xid::`` + fname = fname.split("::")[-1] + clean_record[fname] = clean_record.pop(source_fname) + if prefix and fname.startswith(prefix): + # Eg: prefix all supplier fields w/ `supplier.` + fname = fname[len(prefix) :] + clean_record[fname] = clean_record.pop(prefix + fname) + final_fname = self._get_field_name(fname, clean_record) + if final_fname != fname: + clean_record[final_fname] = clean_record.pop(fname) + fname = final_fname + + if available_fields.get(fname): + fspec = available_fields.get(fname) + ftype = fspec["type"] + if self._is_xmlid_key(source_fname, ftype): + ftype = "_xmlid" + converter = self._get_converter(fname, ftype) + if converter: + value = converter(self, clean_record, fname) + if not value: + if source_fname in self._source_key_empty_skip: + continue + if fname in required_keys: + missing_required_keys.append(fname) + vals[fname] = value + else: + logger.debug( + "Dynamic mapper cannot find converte for field `%s`", fname + ) + if missing_required_keys: + vals.update(self._get_defaults(missing_required_keys)) + for k in missing_required_keys: + if k in vals and not vals[k]: + # Discard empty values for required keys. + # Avoids overriding values that might be already set + # and that cannot be emptied. + vals.pop(k) + return vals + + def _clean_record(self, record): + valid_keys = self._get_valid_keys(record) + return {k: v for k, v in record.items() if k in valid_keys} + + def _get_valid_keys(self, record): + valid_keys = [k for k in record.keys() if not k.startswith("_")] + prefix = self._source_key_prefix + if prefix: + valid_keys = [k for k in valid_keys if prefix in k] + whitelist = self._source_key_whitelist + if whitelist: + valid_keys = [k for k in valid_keys if k in whitelist] + blacklist = self._source_key_blacklist + if blacklist: + valid_keys = [k for k in valid_keys if k not in blacklist] + return tuple(valid_keys) + + def _required_keys(self): + return [k for k, v in self.model.fields_get().items() if v["required"]] + + @property + def _source_key_whitelist(self): + return self.work.options.mapper.get("source_key_whitelist", []) + + @property + def _source_key_blacklist(self): + return self.work.options.mapper.get("source_key_blacklist", []) + + @property + def _source_key_empty_skip(self): + """List of source keys to skip when empty. + + Use cases: + + * field w/ unique constraint but not populated (eg: product barcode) + * field not to override when empty + """ + return self.work.options.mapper.get("source_key_empty_skip", []) + + @property + def _source_key_prefix(self): + return self.work.options.mapper.get("source_key_prefix", "") + + @property + def _source_key_rename(self): + return self.work.options.mapper.get("source_key_rename", {}) + + def _get_field_name(self, fname, clean_record): + """Return final field name. + + Field names can be manipulated via mapper option `source_key_rename` + which must be a dictionary w/ source name -> destination name. + """ + return self._source_key_rename.get(fname, fname) + + def _is_xmlid_key(self, fname, ftype): + return fname.startswith("xid::") and ftype in ( + "many2one", + "one2many", + "many2many", + ) + + def _dynamic_keys_mapping(self, fname, **options): + return { + "char": lambda self, rec, fname: rec[fname], + "text": lambda self, rec, fname: rec[fname], + "selection": lambda self, rec, fname: rec[fname], + "integer": convert(fname, "safe_int", **options), + "float": convert(fname, "safe_float", **options), + "boolean": convert(fname, "bool", **options), + "date": convert(fname, "date", **options), + "datetime": convert(fname, "utc_date", **options), + "many2one": backend_to_rel(fname, **options), + "many2many": backend_to_rel(fname, **options), + "one2many": backend_to_rel(fname, **options), + "_xmlid": xmlid_to_rel(fname, **options), + } + + def _get_converter(self, fname, ftype): + options = self.work.options.mapper.get("converter", {}).get(fname, {}) + return self._dynamic_keys_mapping(fname, **options).get(ftype) + + _non_mapped_keys_cache = None + + def _non_mapped_keys(self, record): + if self._non_mapped_keys_cache is None: + all_keys = set(record.keys()) + mapped_keys = set() + # NOTE: keys coming from `@mapping` methods can't be tracked. + # Worse case: they get computed twice. + # TODO: make sure `dynamic_fields` runs at the end + # or move it to `finalize` + for pair in self.direct: + if isinstance(pair[0], str): + mapped_keys.add(pair[0]) + elif hasattr(pair[0], "_from_key"): + mapped_keys.add(pair[0]._from_key) + self._non_mapped_keys_cache = tuple(all_keys - mapped_keys) + return self._non_mapped_keys_cache + + def _get_defaults(self, fnames): + return self.model.default_get(fnames) diff --git a/connector_importer/components/importer.py b/connector_importer/components/importer.py new file mode 100644 index 000000000..1f5895f7b --- /dev/null +++ b/connector_importer/components/importer.py @@ -0,0 +1,422 @@ +# Author: Simone Orsi +# Copyright 2018 Camptocamp SA +# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html). +from odoo import _, exceptions + +from odoo.addons.component.core import Component + +from ..log import LOGGER_NAME, logger + + +class RecordSetImporter(Component): + """Importer for recordsets.""" + + _name = "importer.recordset" + _inherit = "importer.base.component" + _usage = "recordset.importer" + _apply_on = "import.recordset" + + def run(self, recordset, **kw): + """Run recordset job. + + Steps: + + * update last start date on recordset + * read source + * process all source lines in chunks + * create an import record per each chunk + * schedule import for each record + """ + # reset recordset + recordset._prepare_for_import_session() + msg = f"START RECORDSET {recordset.name} ({recordset.id})" + logger.info(msg) + # flush existing records as we are going to re-create them + source = recordset.get_source() + if not source: + raise exceptions.UserError( + _("No source configured on recordset '%s'") % recordset.name + ) + for chunk in source.get_lines(): + # create chuncked records and run their imports + record = self.env["import.record"].create({"recordset_id": recordset.id}) + # store data + record.set_data(chunk) + record.run_import() + + +class RecordImporter(Component): + """Importer for records. + + This importer is actually the one that does the real import work. + It loads each import records and tries to import them + and keep tracks of errored, skipped, etc. + See `run` method for detailed information on what it does. + """ + + _name = "importer.record" + _inherit = ["importer.base.component"] + _usage = "record.importer" + # log and report errors + # do not make the whole import fail + _break_on_error = False + _record_handler_usage = "odoorecord.handler" + _tracking_handler_usage = "tracking.handler" + # a unique key (field name) to retrieve the odoo record + # if this key is an external/XML ID, prefix the name with `xid::` (eg: xid::id) + odoo_unique_key = "" + + def _init_importer(self, recordset): + self.recordset = recordset + # record handler is responsible for create/write on odoo records + self.record_handler = self.component(usage=self._record_handler_usage) + self.record_handler._init_handler( + importer=self, + unique_key=self.unique_key, + ) + # tracking handler is responsible for logging and chunk reports + self.tracker = self.component(usage=self._tracking_handler_usage) + self.tracker._init_handler( + model_name=self.model._name, + logger_name=LOGGER_NAME, + log_prefix=self.recordset.import_type_id.key + " ", + ) + + @property + def unique_key(self): + return self.work.options.importer.get("odoo_unique_key", self.odoo_unique_key) + + @property + def unique_key_is_xmlid(self): + return self.unique_key.startswith("xid::") or self.unique_key == "id" + + # Override to not rely on automatic mapper lookup. + # This is especially needed if you register more than one importer + # for a given odoo model. Eg: 2 importers for res.partner + # (1 for customers and 1 for suppliers) + _mapper_name = None + _mapper_usage = "importer.mapper" + # just an instance cache for the mapper + _mapper = None + + # TODO: do the same for record handler and tracking handler + def _get_mapper(self): + mapper_name = self.work.options.mapper.get("name", self._mapper_name) + if mapper_name: + return self.component_by_name(mapper_name) + mapper_usage = self.work.options.mapper.get("usage", self._mapper_usage) + return self.component(usage=mapper_usage) + + @property + def mapper(self): + if not self._mapper: + self._mapper = self._get_mapper() + return self._mapper + + @property + def must_break_on_error(self): + return self.work.options.importer.get("break_on_error", self._break_on_error) + + @property + def must_override_existing(self): + return self.work.options.importer.get( + "override_existing", self.recordset.override_existing + ) + + def required_keys(self, create=False): + """Keys that are mandatory to import a line.""" + req = self.mapper.required_keys() + all_values = [] + for k, v in req.items(): + # make sure values are always tuples + # as we support multiple dest keys + if not isinstance(v, tuple | list): + req[k] = (v,) + all_values.extend(req[k]) + unique_key = self.unique_key + if ( + unique_key + and unique_key not in list(req.keys()) + and unique_key not in all_values + ): + # this one is REALLY required :) + req[unique_key] = (unique_key,) + return req + + # mostly for auto-documentation in UI + def default_values(self): + """Values that are automatically assigned.""" + return self.mapper.default_values() + + def translatable_keys(self, create=False): + """Keys that are translatable.""" + return self.mapper.translatable_keys() + + def translatable_langs(self): + return self.env["res.lang"].search([("active", "=", True)]).mapped("code") + + def make_translation_key(self, key, lang): + sep = self.work.options.importer.get("translation_key_sep", ":") + regional_lang = self.work.options.importer.get( + "translation_use_regional_lang", False + ) + if not regional_lang: + lang = lang[:2] # eg: "de_DE" -> "de" + return f"{key}{sep}{lang}" + + def collect_translatable(self, values, orig_values): + """Get translations values for `mapper.translatable_keys`. + + We assume that the source contains translatable columns in the form: + + `mapper_key:lang` + + whereas `mapper_key` is an odoo record field to translate + and lang matches one of the installed languages. + + Translatable keys must be declared on the mapper + within the attribute `translatable`. + """ + translatable = {} + if not self.translatable_keys(): + return translatable + for lang in self.translatable_langs(): + for key in self.translatable_keys(): + # eg: name:fr_FR + tkey = self.make_translation_key(key, lang) + if tkey in orig_values and values.get(key): + if lang not in translatable: + translatable[lang] = {} + # we keep only translation for existing values + translatable[lang][key] = orig_values.get(tkey) + return translatable + + def _check_missing(self, source_key, dest_key, values, orig_values): + """Check for required keys missing.""" + missing = ( + not source_key.startswith("__") and orig_values.get(source_key) is None + ) + unique_key = self.unique_key + if missing: + msg = f"MISSING REQUIRED SOURCE KEY={source_key}" + if unique_key and values.get(unique_key): + msg += f": {unique_key}={values[unique_key]}" + return {"message": msg} + missing = not dest_key.startswith("__") and values.get(dest_key) is None + is_xmlid = dest_key == unique_key and self.unique_key_is_xmlid + if missing and not is_xmlid: + msg = f"MISSING REQUIRED DESTINATION KEY={dest_key}" + if unique_key and values.get(unique_key): + msg += f": {unique_key}={values[unique_key]}" + return {"message": msg} + return False + + def skip_it(self, values, orig_values): + """Skip item import conditionally... if you want ;). + + You can return back `False` to not skip + or a dictionary containing info about skip reason. + """ + msg = "" + required = self.required_keys() + for source_key, dest_key in required.items(): + # we support multiple destination keys + for _dest_key in dest_key: + missing = self._check_missing( + source_key, _dest_key, values, orig_values + ) + if missing: + return missing + + if ( + self.record_handler.odoo_exists(values, orig_values) + and not self.must_override_existing + ): + msg = "ALREADY EXISTS" + if self.unique_key: + msg += f": {self.unique_key}={values[self.unique_key]}" + return { + "message": msg, + "odoo_record": self.record_handler.odoo_find(values, orig_values).id, + } + return False + + def _cleanup_line(self, line): + """Apply basic cleanup on lines.""" + # we cannot alter dict keys while iterating + res = {} + for k, v in line.items(): + # skip internal tech keys if any + if not k.startswith("_"): + k = self.clean_line_key(k) + if isinstance(v, str): + v = v.strip() + res[k] = v + return res + + def clean_line_key(self, key): + """Clean record key. + + Sometimes your CSV source do not have proper keys, + they can contain a lot of crap or they can change + lower/uppercase from import to importer. + You can override this method to normalize keys + and make your import mappers work reliably. + """ + return key.strip() + + def prepare_line(self, line): + """Pre-manipulate a line if needed. + + For instance: you might want to fix some field names. + Sometimes in CSV you have mispelled names + (upper/lowercase, spaces, etc) all chars that might break your mappers. + + Here you can adapt the source line before the mapper is called + so that the logic in the mapper will be always the same. + """ + return self._cleanup_line(line) + + def _do_report(self): + """Update recordset report using the tracker.""" + previous = self.recordset.get_report() + report = self.tracker.get_report(previous) + self.recordset.set_report({self.model._name: report}) + + def _record_lines(self): + """Get lines from import record.""" + return self.record.get_data() + + def _load_mapper_options(self): + """Retrieve mapper options.""" + return {"override_existing": self.must_override_existing} + + def _odoo_default_context(self): + """Default context to be used in both create and write methods""" + ctx = { + "importer_type_id": self.recordset.import_type_id.id, + "tracking_disable": True, + } + ctx.update(self.work.options.importer.get("ctx", {})) + return ctx + + def _odoo_create_context(self): + """Inject context variables on create, merged by odoorecord handler.""" + return self._odoo_default_context() + + def _odoo_write_context(self): + """Inject context variables on write, merged by odoorecord handler.""" + return self._odoo_default_context() + + def run(self, record, is_last_importer=True, **kw): + """Run record job. + + Steps: + + * check if record is still available + * initialize the import + * read each line to be imported + * clean them up + * manipulate them (field names fixes and such) + * retrieve a mapper and convert values + * check and skip record if needed + * if record exists: update it, else, create it + * produce a report and store it on recordset + """ + + self.record = record + if not self.record: + # maybe deleted??? + msg = "NO RECORD FOUND, maybe deleted? Check your jobs!" + logger.error(msg) + return + + self._init_importer(self.record.recordset_id) + for line in self._record_lines(): + line = self.prepare_line(line) + options = self._load_mapper_options() + + odoo_record = None + + try: + with self.env.cr.savepoint(): + values = self.mapper.map_record(line).values(**options) + logger.debug(values) + except Exception as err: + values = {} + self.tracker.log_error(values, line, odoo_record, message=err) + if self.must_break_on_error: + raise + continue + + # handle forced skipping + skip_info = self.skip_it(values, line) + if skip_info: + self.tracker.log_skipped(values, line, skip_info) + continue + + try: + with self.env.cr.savepoint(): + if self.record_handler.odoo_exists(values, line): + odoo_record = self.record_handler.odoo_write(values, line) + self.tracker.log_updated(values, line, odoo_record) + else: + if self.work.options.importer.write_only: + self.tracker.log_skipped( + values, + line, + {"message": "Write-only importer, record not found."}, + ) + continue + odoo_record = self.record_handler.odoo_create(values, line) + self.tracker.log_created(values, line, odoo_record) + except Exception as err: + self.tracker.log_error(values, line, odoo_record, message=err) + if self.must_break_on_error: + raise + continue + + # update report + self._do_report() + + # log chunk finished + counters = self.tracker.get_counters() + msg = " ".join( + [ + "CHUNK FINISHED", + "[created: {created}]", + "[updated: {updated}]", + "[skipped: {skipped}]", + "[errored: {errored}]", + ] + ).format(**counters) + self.tracker._log(msg) + self.finalize_session(record, is_last_importer=is_last_importer) + return counters + + def finalize_session(self, record, is_last_importer=False): + self._trigger_importer_events(record) + if is_last_importer: + self._trigger_finish_events(record) + + def _trigger_importer_events(self, record): + """Trigger events when the importer has done its job.""" + # Trigger global event for recordset + self.recordset._event( + "on_record_import_finished", collection=self.work.collection + ).notify(self, record) + # Trigger model specific event + self.model.browse()._event( + "on_record_import_finished", collection=self.work.collection + ).notify(self, record) + + def _trigger_finish_events(self, record): + """Trigger events when the importer has done its job.""" + # Trigger global event for recordset + self.recordset._event( + "on_last_record_import_finished", collection=self.work.collection + ).notify(self, record) + # Trigger model specific event + self.model.browse()._event( + "on_last_record_import_finished", collection=self.work.collection + ).notify(self, record) diff --git a/connector_importer/components/importer_csv_std.py b/connector_importer/components/importer_csv_std.py new file mode 100644 index 000000000..a43c675a9 --- /dev/null +++ b/connector_importer/components/importer_csv_std.py @@ -0,0 +1,170 @@ +# Copyright 2019 Camptocamp SA +# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl) + +from odoo.addons.component.core import Component + +from ..log import logger + + +class RecordImporterCSVStd(Component): + """CSV Standard importer for records. + + This importer is used to import standard CSV files, using the `load()` + method of Odoo. + """ + + _name = "importer.record.csv.std" + _inherit = ["importer.record"] + + # TODO: we should be able to set simple settings in the configuration + # so we don't have to create a new importer for it + _break_on_error = True # We want the import to stop if an error occurs + _apply_on = None + _use_xmlid = True + _record_handler_usage = "odoorecord.handler.csv" + + @property + def mapper(self): + if not self._mapper: + self._mapper = self.component(usage="importer.automapper") + return self._mapper + + def prepare_load_params(self, lines): + """Prepare the parameters for the `load()` standard method. + + It returns a list of fieldnames + the list of corresponding values. + """ + fieldnames = list(lines[0].keys()) + + data = [[line[fieldname] for fieldname in fieldnames] for line in lines] + return fieldnames, data + + def run(self, record, is_last_importer=True, **kw): # noqa: C901 + """Run record job. + + Steps: + + * for each record, check if it is already imported or not and reference + them as created or updated + * launch the import with 'load()' method + * analyse error messages returned by 'load()' and remove relevant + references from the first step + create log error for them + * produce a report and store it on recordset + """ + # noqa: C901 + self.record = record + if not self.record: + # maybe deleted??? + msg = "NO RECORD FOUND, maybe deleted? Check your jobs!" + logger.error(msg) + return msg + + self._init_importer(self.record.recordset_id) + + dataset = [] + tracker_data = { + "created": { + # line_nr: (values, line, odoo_record), + }, + "updated": { + # line_nr: (values, line, odoo_record), + }, + } + lines = self._record_lines() + # The `load` method for standard import works on the whole dataset. + # First we prepare all lines with the mapper + # (so you can still customize imported data if needed) + # and we create dataset to pass to `load`. + for i, line in enumerate(lines): + line = self.prepare_line(line) + options = self._load_mapper_options() + try: + with self.env.cr.savepoint(): + values = self.mapper.map_record(line).values(**options) + logger.debug(values) + except Exception as err: + values = {} + self.tracker.log_error(values, line, message=err) + if self.must_break_on_error: + raise + continue + # Collect tracker data for later + # We store the parameters for chunk_report.track_{created,updated} + # functions, excepted the odoo_record which could not be known + # for newly created records + odoo_record_exists = self.record_handler.odoo_exists( + values, line, use_xmlid=self._use_xmlid + ) + if odoo_record_exists: + odoo_record = self.record_handler.odoo_find( + values, line, use_xmlid=self._use_xmlid + ) + tracker_data["updated"][i] = [values, line, odoo_record] + else: + tracker_data["created"][i] = [values, line] + + # handle forced skipping + skip_info = self.skip_it(values, line) + if skip_info: + self.tracker.log_skipped(values, line, skip_info) + continue + dataset.append(values) + + if dataset: + try: + with self.env.cr.savepoint(): + fieldnames, data = self.prepare_load_params(dataset) + load_res = self.model.load(fieldnames, data) + + # In case of errors `load` returns a list of messages with + # the cause and the rows range. Here we map these messages + # to tracked data and update the references to be able + # to provide a precise report. + for message in load_res["messages"]: + if message.get("rows"): + line_numbers = range( + message["rows"]["from"], message["rows"]["to"] + 1 + ) + for line_nr in line_numbers: + # First we remove the entry from tracker data + tracker_data["created"].pop(line_nr, None) + tracker_data["updated"].pop(line_nr, None) + # We add 2 as the tracker count lines starting + # from 1 + header line + line = {"_line_nr": line_nr + 2} + self.tracker.log_error( + {}, line, message=message["message"] + ) + else: + line = {"_line_nr": 0} + self.tracker.log_error({}, line, message=message["message"]) + except Exception as err: + line = {"_line_nr": 0} + self.tracker.log_error({}, line, message=err) + if self.must_break_on_error: + raise + + for arguments in tracker_data["created"].values(): + self.tracker.log_created(*arguments) + for arguments in tracker_data["updated"].values(): + self.tracker.log_updated(*arguments) + + # update report + self._do_report() + + # log chunk finished + msg = " ".join( + [ + "CHUNK FINISHED", + "[created: {created}]", + "[updated: {updated}]", + "[skipped: {skipped}]", + "[errored: {errored}]", + ] + ).format(**self.tracker.get_counters()) + self.tracker._log(msg) + + # TODO + # chunk_finished_event.fire( + # self.env, self.model._name, self.record) + return "ok" diff --git a/connector_importer/components/listeners.py b/connector_importer/components/listeners.py new file mode 100644 index 000000000..698d91a09 --- /dev/null +++ b/connector_importer/components/listeners.py @@ -0,0 +1,81 @@ +# Copyright 2023 Camptocamp SA +# @author: Simone Orsi +# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html). +from functools import partial + +from odoo.addons.component.core import Component + + +class ImportRecordsetEventListener(Component): + _name = "recordset.event.listener" + _inherit = "base.connector.listener" + _apply_on = ["import.recordset"] + + def on_last_record_import_finished(self, importer, record): + if self._must_run_server_action(importer, record, "last_importer_done"): + self._run_server_actions(importer, record) + + def on_record_import_finished(self, importer, record): + if self._must_run_server_action(importer, record, "each_importer_done"): + self._run_server_actions(importer, record) + + def _must_run_server_action(self, importer, record, trigger): + recordset = record.recordset_id + return bool( + recordset.server_action_ids + and recordset.server_action_trigger_on == trigger + and self._has_records_to_process(importer) + ) + + def _has_records_to_process(self, importer): + counters = importer.tracker.get_counters() + return counters["created"] or counters["updated"] + + def _run_server_actions(self, importer, record): + """Execute one or more server actions tied to the recordset.""" + recordset = record.recordset_id + actions = recordset.server_action_ids + report_by_model = recordset.get_report_by_model() + # execute actions by importer order + for model, report in report_by_model.items(): + action = actions.filtered(lambda x, model=model: x.model_id == model) + if not action: + continue + record_ids = sorted(set(report["created"] + report["updated"])) + if not record_ids: + continue + self._add_after_commit_hook(recordset.id, action.id, record_ids) + generic_action = actions.filtered( + lambda x: x.model_id.model == "import.recordset" + ) + if generic_action: + self._add_after_commit_hook(recordset.id, generic_action.id, recordset.ids) + + def _run_server_action(self, recordset_id, action_id, record_ids): + action = self.env["ir.actions.server"].browse(action_id) + action = action.with_context( + **self._run_server_action_ctx(recordset_id, action_id, record_ids) + ) + return action.run() + + def _run_server_action_ctx(self, recordset_id, action_id, record_ids): + action = self.env["ir.actions.server"].browse(action_id) + action_ctx = dict( + active_model=action.model_id.model, import_recordset_id=recordset_id + ) + if len(record_ids) > 1: + action_ctx["active_ids"] = record_ids + else: + action_ctx["active_id"] = record_ids[0] + return action_ctx + + def _add_after_commit_hook(self, recordset_id, action_id, record_ids): + self.env.cr.postcommit.add( + partial( + self._run_server_action_post_commit, recordset_id, action_id, record_ids + ), + ) + + def _run_server_action_post_commit(self, recordset_id, action_id, record_ids): + self._run_server_action(recordset_id, action_id, record_ids) + self.env.cr.commit() # pylint: disable=invalid-commit diff --git a/connector_importer/components/mapper.py b/connector_importer/components/mapper.py new file mode 100644 index 000000000..e2d9ac664 --- /dev/null +++ b/connector_importer/components/mapper.py @@ -0,0 +1,116 @@ +# Author: Simone Orsi +# Copyright 2018 Camptocamp SA +# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html). + + +from odoo import _, exceptions + +from odoo.addons.component.core import Component +from odoo.addons.connector.components.mapper import mapping + +from ..log import logger + + +class ImportMapper(Component): + _name = "importer.base.mapper" + _inherit = ["importer.base.component", "base.import.mapper"] + _usage = "importer.mapper" + + required = { + # source key: dest key + # You can declare here the keys the importer must have + # to import a record. + # `source key` means a key in the source record + # either a line in a csv file or a lien from an sql table. + # `dest key` is the destination the for the source one. + # Eg: in your mapper you could have a mapping like + # direct = [ + # ('title', 'name'), + # (concat(('title', 'foo', ), separator=' - '), 'baz'), + # ] + # You want the record to be skipped if: + # 1. title or name are not valued in the source + # 2. title is valued but the conversion gives an empty value for name + # 3. title or foo are not valued in the source + # 4. title and foo are valued but the conversion + # gives an empty value for baz + # You can achieve this like: + # required = { + # 'title': ('name', 'baz'), + # 'foo': 'baz', + # } + # If you want to check only the source or the destination key + # use the same name and prefix it w/ double underscore, like: + # {'__foo': 'baz', 'foo': '__baz'} + } + + def required_keys(self, create=False): + """Return required keys for this mapper. + + The importer can use this to determine if a line + has to be skipped. + + The recordset will use this to show required fields to users. + """ + req = dict(self.required) + req.update(self.work.options.mapper.get("required_keys", {})) + return req + + translatable = [] + + def translatable_keys(self, create=False): + """Return translatable keys for this mapper. + + The importer can use this to translate specific fields + if the are found in the csv in the form `field_name:lang_code`. + + The recordset will use this to show translatable fields to users. + """ + translatable = list(self.translatable) + translatable += self.work.options.mapper.get("translatable_keys", []) + translatable = self._validate_translate_keys(set(translatable)) + return tuple(translatable) + + def _validate_translate_keys(self, translatable): + valid = [] + fields_spec = self.model.fields_get(translatable) + for fname in translatable: + if not fields_spec.get(fname): + logger.error("%s - translate key not found: `%s`.", self._name, fname) + continue + if not fields_spec[fname]["translate"]: + logger.error("%s - `%s` key is not translatable.", self._name, fname) + continue + valid.append(fname) + return valid + + defaults = [ + # odoo field, value + # ('sale_ok', True), + # defaults can be also retrieved via xmlid to other records. + # The format is: `_xmlid::$record_xmlid::$record_field_value` + # whereas `$record_xmlid` is the xmlid to retrieve + # and ``$record_field_value` is the field to be used as value. + # Example: + # ('company_id', '_xmlid::base.main_company:id'), + ] + + @mapping + def default_values(self, record=None): + """Return default values for this mapper. + + The recordset will use this to show default values to users. + """ + values = {} + for k, v in self.defaults: + if isinstance(v, str) and v.startswith("_xmlid::"): + real_val = v.replace("_xmlid::", "").strip() + if not real_val or ":" not in real_val: + raise exceptions.UserError( + _("Malformated xml id ref: `%s`") % real_val + ) + xmlid, field_value = real_val.split(":") + v = self.env.ref(xmlid)[field_value] + values[k] = v + values.update(self.work.options.mapper.get("default_keys", {})) + return values diff --git a/connector_importer/components/odoorecord.py b/connector_importer/components/odoorecord.py new file mode 100644 index 000000000..22f55c36f --- /dev/null +++ b/connector_importer/components/odoorecord.py @@ -0,0 +1,235 @@ +# Author: Simone Orsi +# Copyright 2018 Camptocamp SA +# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html). + +from odoo.tools import safe_eval + +from odoo.addons.component.core import Component + +from ..utils.misc import sanitize_external_id + +NO_VALUE = object() + + +class OdooRecordHandler(Component): + """Interact w/ odoo importable records.""" + + _name = "importer.odoorecord.handler" + _inherit = "importer.base.component" + _usage = "odoorecord.handler" + + # TODO: collect these from `work.options.record_handler` + unique_key = "" + importer = None + # By default odoo ignores create_uid/write_uid in vals. + # If you enable this flags and `create_uid` and/or `write_uid` + # are found in values they gonna be used for sudo. + # Same for `create_date`. + override_create_uid = False + override_create_date = False + override_write_uid = False + override_write_date = False + + def _init_handler(self, importer=None, unique_key=None): + self.importer = importer + self.unique_key = unique_key + + @property + def unique_key_is_xmlid(self): + return self.importer.unique_key_is_xmlid + + def odoo_find_domain(self, values, orig_values): + """Domain to find the record in odoo.""" + domain = self._odoo_find_domain_from_options(values, orig_values) + if not domain: + if not self.unique_key: + raise ValueError("No unique key and no domain to find this record") + domain = self._odoo_find_domain_from_unique_key(values, orig_values) + return domain + + def _odoo_find_domain_from_options(self, values, orig_values): + """Evaluate domain from options if any.""" + match_domain = self.work.options.record_handler.match_domain + if not match_domain: + return [] + eval_ctx = self._domain_from_options_eval_ctx(values, orig_values) + domain = safe_eval.safe_eval( + self.work.options.record_handler.match_domain, eval_ctx + ) + if not isinstance(domain, list): + raise ValueError("match_domain must be a list") + return domain + + def _domain_from_options_eval_ctx(self, values, orig_values): + return { + "env": self.env, + "user": self.env.user, + "datetime": safe_eval.datetime, + "dateutil": safe_eval.dateutil, + "time": safe_eval.time, + "values": values, + "orig_values": orig_values, + "ref_id": lambda x: self._smart_ref(x).id, + "ref": lambda x: self._smart_ref(x), + } + + def _odoo_find_domain_from_unique_key(self, values, orig_values): + value = NO_VALUE + if self.unique_key in values: + value = values[self.unique_key] + elif self.unique_key in orig_values: + value = orig_values[self.unique_key] + if value is NO_VALUE: + raise ValueError( + f"Cannot find `{self.unique_key}` key in `values` nor `orig_values`" + ) + return [(self.unique_key, "=", value)] + + def odoo_find(self, values, orig_values): + """Find any existing item in odoo.""" + if self.unique_key and self.unique_key_is_xmlid: + # if unique_key is None we might use as special find domain + xid = self._get_xmlid(values, orig_values) + item = self.env.ref(xid, raise_if_not_found=False) + return item or self.model + item = self.model.search( + self.odoo_find_domain(values, orig_values), + order="create_date desc", + limit=1, + ) + return item + + def _smart_ref(self, xid): + return self.env.ref(sanitize_external_id(xid)) + + def _get_xmlid(self, values, orig_values): + # Mappers will remove `xid::` prefix from the final values + # hence, look for the original key. + return sanitize_external_id(orig_values.get(self.unique_key)) + + def odoo_exists(self, values, orig_values): + """Return true if the items exists.""" + return bool(self.odoo_find(values, orig_values)) + + def update_translations(self, odoo_record, translatable, ctx=None): + """Write translations on given record.""" + ctx = ctx or {} + for lang, values in translatable.items(): + odoo_record.with_context(lang=lang, **self.write_context()).write( + values.copy() + ) + + def odoo_pre_create(self, values, orig_values): + """Do some extra stuff before creating a missing record.""" + + def odoo_post_create(self, odoo_record, values, orig_values): + """Do some extra stuff after creating a missing record.""" + + def create_context(self): + """Inject context variables on create.""" + return dict( + self.importer._odoo_create_context(), + # mark each action w/ this flag + connector_importer_session=True, + ) + + @property + def must_generate_xmlid(self): + return self.work.options.record_handler.get( + "must_generate_xmlid", self.unique_key_is_xmlid + ) + + def odoo_create(self, values, orig_values): + """Create a new odoo record.""" + self.odoo_pre_create(values, orig_values) + # TODO: remove keys that are not model's fields + odoo_record = self.model.with_context(**self.create_context()).create( + values.copy() + ) + # force uid + if self.override_create_uid and values.get("create_uid"): + self._force_value(odoo_record, values, "create_uid") + # force create date + if self.override_create_date and values.get("create_date"): + self._force_value(odoo_record, values, "create_date") + self.odoo_post_create(odoo_record, values, orig_values) + translatable = self.importer.collect_translatable(values, orig_values) + self.update_translations(odoo_record, translatable) + # Set the external ID if necessary + if self.must_generate_xmlid: + xid = self._get_xmlid(values, orig_values) + if not self.env.ref(xid, raise_if_not_found=False): + module, id_ = xid.split(".", 1) + self.env["ir.model.data"].create( + { + "name": id_, + "module": module, + "model": odoo_record._name, + "res_id": odoo_record.id, + "noupdate": False, + } + ) + return odoo_record + + def odoo_pre_write(self, odoo_record, values, orig_values): + """Do some extra stuff before updating an existing object.""" + + def odoo_post_write(self, odoo_record, values, orig_values): + """Do some extra stuff after updating an existing object.""" + + def write_context(self): + """Inject context variables on write.""" + return dict( + self.importer._odoo_write_context(), + # mark each action w/ this flag + connector_importer_session=True, + ) + + def odoo_write(self, values, orig_values): + """Update an existing odoo record.""" + # pass context here to be applied always on retrieved record + odoo_record = self.odoo_find(values, orig_values).with_context( + **self.write_context() + ) + # copy values to not affect original values (mainly for introspection) + values_for_write = values.copy() + # purge unneeded values + self._odoo_write_purge_values(odoo_record, values_for_write) + # hook before write + self.odoo_pre_write(odoo_record, values_for_write, orig_values) + # do write now + odoo_record.write(values_for_write) + # force uid + if self.override_write_uid and values.get("write_uid"): + self._force_value(odoo_record, values, "write_uid") + # force write date + if self.override_write_date and values.get("write_date"): + self._force_value(odoo_record, values, "write_date") + # hook after write + self.odoo_post_write(odoo_record, values_for_write, orig_values) + # handle translations + translatable = self.importer.collect_translatable(values, orig_values) + self.update_translations(odoo_record, translatable) + return odoo_record + + def _force_value(self, record, values, fname): + # the query construction is not vulnerable to SQL injection, as we are + # replacing the table and column names here. + # pylint: disable=sql-injection + query = f"UPDATE {record._table} SET {fname} = %s WHERE id = %s" + self.env.cr.execute(query, (values[fname], record.id)) + record.invalidate_recordset([fname]) + + def _odoo_write_purge_values(self, odoo_record, values): + # remove non fields values + field_names = tuple(values.keys()) + for fname in field_names: + if fname not in self.model._fields: + values.pop(fname) + # remove fields having the same value + field_names = tuple(values.keys()) + if self.work.options.record_handler.skip_fields_unchanged: + current_values = odoo_record.read(field_names, load="_classic_write") + for k, v in current_values.items(): + if values[k] != v: + values.pop(k) diff --git a/connector_importer/components/odoorecord_csv_std.py b/connector_importer/components/odoorecord_csv_std.py new file mode 100644 index 000000000..268e203ab --- /dev/null +++ b/connector_importer/components/odoorecord_csv_std.py @@ -0,0 +1,38 @@ +# Copyright 2019 Camptocamp SA +# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl) + +from odoo.addons.component.core import Component + + +class OdooRecordHandlerCSVStd(Component): + """Interact w/ odoo importable records from standard Odoo CSV files.""" + + _name = "importer.odoorecord.handler.csv.std" + _inherit = "importer.odoorecord.handler" + _usage = "odoorecord.handler.csv" + xmlid_key = "id" # CSV field containing the record XML-ID + + def odoo_find(self, values, orig_values, use_xmlid=False): + """Find any existing item in odoo based on the XML-ID.""" + if use_xmlid: + if not self.xmlid_key: + return self.model + item = self.env.ref(values[self.xmlid_key], raise_if_not_found=False) + return item + return super().odoo_find(values, orig_values) + + def odoo_exists(self, values, orig_values, use_xmlid=False): + """Return true if the items exists.""" + return bool(self.odoo_find(values, orig_values, use_xmlid)) + + def odoo_create(self, values, orig_values): + """Create a new odoo record.""" + raise NotImplementedError( + "This method is not used when importing standard CSV files." + ) + + def odoo_write(self, values, orig_values): + """Create a new odoo record.""" + raise NotImplementedError( + "This method is not used when importing standard CSV files." + ) diff --git a/connector_importer/components/tracker.py b/connector_importer/components/tracker.py new file mode 100644 index 000000000..cdc559533 --- /dev/null +++ b/connector_importer/components/tracker.py @@ -0,0 +1,136 @@ +# Author: Simone Orsi +# Copyright 2018 Camptocamp SA +# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html). + +import logging + +from odoo.addons.component.core import Component + + +class ChunkReport(dict): + """A smarter dict for chunk reports.""" + + chunk_report_keys = ("created", "updated", "errored", "skipped") + + def __init__(self, **kwargs): + super().__init__(**kwargs) + for k in self.chunk_report_keys: + self[k] = [] + + def track_error(self, item): + self["errored"].append(item) + + def track_skipped(self, item): + self["skipped"].append(item) + + def track_updated(self, item): + self["updated"].append(item) + + def track_created(self, item): + self["created"].append(item) + + def counters(self): + res = {} + for k, v in self.items(): + res[k] = len(v) + return res + + +class Tracker(Component): + """Track what happens during importer jobs.""" + + _name = "importer.tracking.handler" + _inherit = "importer.base.component" + _usage = "tracking.handler" + + model_name = "" + logger_name = "" + log_prefix = "" + _chunk_report_klass = ChunkReport + + def _init_handler(self, model_name="", logger_name="", log_prefix=""): + self.model_name = model_name + self.logger_name = logger_name + self.log_prefix = log_prefix + + _logger = None + _chunk_report = None + + @property + def logger(self): + if not self._logger: + self._logger = logging.getLogger(self.logger_name) + return self._logger + + @property + def chunk_report(self): + if not self._chunk_report: + self._chunk_report = self._chunk_report_klass() + return self._chunk_report + + def chunk_report_item(self, line, odoo_record=None, message="", values=None): + return { + "line_nr": line["_line_nr"], + "message": message, + "model": self.model_name, + "odoo_record": odoo_record.id if odoo_record else None, + } + + def _log(self, msg, line=None, level="info"): + handler = getattr(self.logger, level) + msg = "{prefix}{line}[model: {model}] {msg}".format( + prefix=self.log_prefix, + line="[line: {}]".format(line["_line_nr"]) if line else "", + model=self.model_name, + msg=msg, + ) + handler(msg) + + def log_updated(self, values, line, odoo_record=None, message=""): + if odoo_record: + self._log(f"UPDATED [id: {odoo_record.id}]", line=line) + self.chunk_report.track_updated( + self.chunk_report_item( + line, odoo_record=odoo_record, message=message, values=values + ) + ) + + def log_error(self, values, line, odoo_record=None, message=""): + if isinstance(message, Exception): + message = str(message) + self._log(message, line=line, level="error") + self.chunk_report.track_error( + self.chunk_report_item( + line, odoo_record=odoo_record, message=message, values=values + ) + ) + + def log_created(self, values, line, odoo_record=None, message=""): + if odoo_record: + self._log(f"CREATED [id: {odoo_record.id}]", line=line) + self.chunk_report.track_created( + self.chunk_report_item( + line, odoo_record=odoo_record, message=message, values=values + ) + ) + + def log_skipped(self, values, line, skip_info): + # `skip_it` could contain a msg + self._log("SKIPPED " + skip_info.get("message"), line=line, level="warning") + + item = self.chunk_report_item(line, values=values) + item.update(skip_info) + self.chunk_report.track_skipped(item) + + def get_report(self, previous=None): + previous = previous or {} + # init a new report + report = self._chunk_report_klass() + # merge previous and current + for k, _v in report.items(): + prev = previous.get(self.model_name, {}).get(k, []) + report[k] = prev + self.chunk_report[k] + return report + + def get_counters(self): + return self.chunk_report.counters() diff --git a/connector_importer/controllers/__init__.py b/connector_importer/controllers/__init__.py new file mode 100644 index 000000000..12a7e529b --- /dev/null +++ b/connector_importer/controllers/__init__.py @@ -0,0 +1 @@ +from . import main diff --git a/connector_importer/controllers/main.py b/connector_importer/controllers/main.py new file mode 100644 index 000000000..23be014af --- /dev/null +++ b/connector_importer/controllers/main.py @@ -0,0 +1,24 @@ +# Author: Simone Orsi +# Copyright 2018 Camptocamp SA +# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html). + +from odoo import http +from odoo.http import request + +from ..utils.report_html import Reporter + + +class ReportController(http.Controller): + """Controller to display import reports.""" + + # TODO: refactor this to use qweb template only + @http.route( + '/importer/import-recordset/', + type="http", + auth="user", + website=False, + ) + def full_report(self, recordset, **kwargs): + reporter = Reporter(recordset.jsondata, detailed=1) + values = {"recordset": recordset, "report": reporter.html(wrapped=0)} + return request.render("connector_importer.recordset_report", values) diff --git a/connector_importer/data/ir_cron.xml b/connector_importer/data/ir_cron.xml new file mode 100644 index 000000000..bf8fd3091 --- /dev/null +++ b/connector_importer/data/ir_cron.xml @@ -0,0 +1,13 @@ + + + + Importer backend: cleanup old recordsets + + code + model.cron_cleanup_recordsets() + + + 1 + weeks + + diff --git a/connector_importer/data/queue_job_function_data.xml b/connector_importer/data/queue_job_function_data.xml new file mode 100644 index 000000000..48a7a140a --- /dev/null +++ b/connector_importer/data/queue_job_function_data.xml @@ -0,0 +1,16 @@ + + + connector_importer + + + + + import_record + + + + + import_recordset + + + diff --git a/connector_importer/events.py b/connector_importer/events.py new file mode 100644 index 000000000..5ab96821c --- /dev/null +++ b/connector_importer/events.py @@ -0,0 +1,28 @@ +# Author: Simone Orsi +# Copyright 2018 Camptocamp SA +# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html). + +from odoo.addons.connector.event import Event + +chunk_finished_event = Event() + + +@chunk_finished_event +def chunk_finished_subscriber(env, dest_model_name, last_record): + """Run `import_record_after_all` after last record has been imported.""" + if not last_record.job_id: + # ok... we are not running in cron mode..my job here has finished! + return + # TODO + # backend = last_record.backend_id + # recordset = last_record.recordset_id + # other_records_completed = [ + # r.job_id.state == 'done' + # for r in recordset.record_ids + # if r != last_record + # ] + # if all(other_records_completed): + # job_method = last_record.with_delay().import_record_after_all + # if backend.debug_mode(): + # job_method = last_record.import_record_after_all + # job_method(last_record_id=record_id) diff --git a/connector_importer/i18n/connector_importer.pot b/connector_importer/i18n/connector_importer.pot new file mode 100644 index 000000000..3bac6a07d --- /dev/null +++ b/connector_importer/i18n/connector_importer.pot @@ -0,0 +1,892 @@ +# Translation of Odoo Server. +# This file contains the translation of the following modules: +# * connector_importer +# +msgid "" +msgstr "" +"Project-Id-Version: Odoo Server 16.0\n" +"Report-Msgid-Bugs-To: \n" +"Last-Translator: \n" +"Language-Team: \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: \n" +"Plural-Forms: \n" + +#. module: connector_importer +#: model_terms:ir.ui.view,arch_db:connector_importer.view_import_backend_form +msgid "" +"JOBS RUNNING\n" +" WARNING: one or more jobs are scheduled for a recorset or a record.\n" +" You will not be able to run the import again or to delete this backend\n" +" until you complete the jobs." +msgstr "" + +#. module: connector_importer +#: model:ir.model.fields,help:connector_importer.field_import_type__name +msgid "A meaningful human-friendly name" +msgstr "" + +#. module: connector_importer +#: model_terms:ir.ui.view,arch_db:connector_importer.view_import_recordset_form +msgid "Additional notes" +msgstr "" + +#. module: connector_importer +#: model_terms:ir.ui.view,arch_db:connector_importer.view_import_type_form +msgid "Adv. options" +msgstr "" + +#. module: connector_importer +#: model_terms:ir.ui.view,arch_db:connector_importer.view_import_type_form +msgid "Advanced" +msgstr "" + +#. module: connector_importer +#: model:ir.model.fields,field_description:connector_importer.field_import_record__backend_id +#: model_terms:ir.ui.view,arch_db:connector_importer.view_import_recordset_search +msgid "Backend" +msgstr "" + +#. module: connector_importer +#: model_terms:ir.ui.view,arch_db:connector_importer.view_import_recordset_form +msgid "Base configuration" +msgstr "" + +#. module: connector_importer +#: model:ir.model,name:connector_importer.model_reporter_mixin +msgid "Base mixin for reporters" +msgstr "" + +#. module: connector_importer +#: model:ir.model.fields,field_description:connector_importer.field_import_source_csv__csv_encoding +msgid "CSV Encoding" +msgstr "" + +#. module: connector_importer +#: model:ir.model.fields,field_description:connector_importer.field_import_source_csv__csv_delimiter +msgid "CSV delimiter" +msgstr "" + +#. module: connector_importer +#: model:ir.model.fields,field_description:connector_importer.field_import_source_csv__csv_file +msgid "CSV file" +msgstr "" + +#. module: connector_importer +#: model:ir.model.fields,field_description:connector_importer.field_import_source_csv__csv_filename +msgid "CSV filename" +msgstr "" + +#. module: connector_importer +#: model:ir.model.fields,field_description:connector_importer.field_import_source_csv__csv_filesize +msgid "CSV filesize" +msgstr "" + +#. module: connector_importer +#: model:ir.model,name:connector_importer.model_import_source_csv +msgid "CSV import source" +msgstr "" + +#. module: connector_importer +#: model:ir.model.fields,field_description:connector_importer.field_import_source_csv__csv_path +msgid "CSV path" +msgstr "" + +#. module: connector_importer +#: model:ir.model.fields,field_description:connector_importer.field_import_source_csv__csv_quotechar +msgid "CSV quotechar" +msgstr "" + +#. module: connector_importer +#: model:ir.model.fields,field_description:connector_importer.field_import_source_csv__csv_rows_from_to +msgid "" +"CSV use only a slice of the available lines. Format: $from:$to. NOTE: " +"recommended only for debug/test purpose." +msgstr "" + +#. module: connector_importer +#: model:ir.model.fields.selection,name:connector_importer.selection__import_recordset__jobs_global_state__cancelled +msgid "Cancelled" +msgstr "" + +#. module: connector_importer +#: model:ir.model.fields,field_description:connector_importer.field_import_source__chunk_size +#: model:ir.model.fields,field_description:connector_importer.field_import_source_csv__chunk_size +msgid "Chunks Size" +msgstr "" + +#. module: connector_importer +#: model:ir.model.fields,field_description:connector_importer.field_import_source__config_summary +#: model:ir.model.fields,field_description:connector_importer.field_import_source_csv__config_summary +msgid "Config Summary" +msgstr "" + +#. module: connector_importer +#: model_terms:ir.ui.view,arch_db:connector_importer.view_import_backend_form +msgid "Configuration" +msgstr "" + +#. module: connector_importer +#: model_terms:ir.ui.view,arch_db:connector_importer.view_import_recordset_form +#: model_terms:ir.ui.view,arch_db:connector_importer.view_import_source_form +msgid "Configure source" +msgstr "" + +#. module: connector_importer +#: model:res.groups,name:connector_importer.group_importer_user +msgid "Connector importer user" +msgstr "" + +#. module: connector_importer +#: model:ir.model.fields,field_description:connector_importer.field_import_recordset__create_date +msgid "Create Date" +msgstr "" + +#. module: connector_importer +#: model:ir.model.fields,field_description:connector_importer.field_import_backend__create_uid +#: model:ir.model.fields,field_description:connector_importer.field_import_record__create_uid +#: model:ir.model.fields,field_description:connector_importer.field_import_recordset__create_uid +#: model:ir.model.fields,field_description:connector_importer.field_import_source_csv__create_uid +#: model:ir.model.fields,field_description:connector_importer.field_import_type__create_uid +msgid "Created by" +msgstr "" + +#. module: connector_importer +#: model:ir.model.fields,field_description:connector_importer.field_import_backend__create_date +#: model:ir.model.fields,field_description:connector_importer.field_import_record__create_date +#: model:ir.model.fields,field_description:connector_importer.field_import_source_csv__create_date +#: model:ir.model.fields,field_description:connector_importer.field_import_type__create_date +msgid "Created on" +msgstr "" + +#. module: connector_importer +#: model_terms:ir.ui.view,arch_db:connector_importer.view_import_backend_form +msgid "Cron" +msgstr "" + +#. module: connector_importer +#: model:ir.model.fields,field_description:connector_importer.field_import_backend__cron_cleanup_keep +msgid "Cron Cleanup Keep" +msgstr "" + +#. module: connector_importer +#: model:ir.model,name:connector_importer.model_cron_mixin +msgid "Cron Mixin" +msgstr "" + +#. module: connector_importer +#: model:ir.model.fields,field_description:connector_importer.field_cron_mixin__cron_mode +#: model:ir.model.fields,field_description:connector_importer.field_import_backend__cron_mode +msgid "Cron mode?" +msgstr "" + +#. module: connector_importer +#: model:ir.model.fields,field_description:connector_importer.field_import_backend__debug_mode +msgid "Debug mode?" +msgstr "" + +#. module: connector_importer +#: model_terms:ir.ui.view,arch_db:connector_importer.recordset_docs +msgid "Defaults" +msgstr "" + +#. module: connector_importer +#: model:ir.model.fields,field_description:connector_importer.field_import_type__description +msgid "Description" +msgstr "" + +#. module: connector_importer +#: model_terms:ir.ui.view,arch_db:connector_importer.recordset_docs +msgid "Destination key" +msgstr "" + +#. module: connector_importer +#: model:ir.model.fields,field_description:connector_importer.field_import_backend__display_name +#: model:ir.model.fields,field_description:connector_importer.field_import_record__display_name +#: model:ir.model.fields,field_description:connector_importer.field_import_recordset__display_name +#: model:ir.model.fields,field_description:connector_importer.field_import_source_csv__display_name +#: model:ir.model.fields,field_description:connector_importer.field_import_type__display_name +msgid "Display Name" +msgstr "" + +#. module: connector_importer +#: model:ir.model.fields,field_description:connector_importer.field_import_recordset__docs_html +#: model_terms:ir.ui.view,arch_db:connector_importer.view_import_recordset_form +msgid "Docs" +msgstr "" + +#. module: connector_importer +#: model_terms:ir.ui.view,arch_db:connector_importer.recordset_docs +msgid "Docs for" +msgstr "" + +#. module: connector_importer +#: model:ir.model.fields.selection,name:connector_importer.selection__import_recordset__jobs_global_state__done +msgid "Done" +msgstr "" + +#. module: connector_importer +#: model:ir.model.fields,field_description:connector_importer.field_import_source_csv__example_file_url +msgid "Download example file" +msgstr "" + +#. module: connector_importer +#: model:ir.model.fields,help:connector_importer.field_import_recordset__override_existing +msgid "" +"Enable to update existing items w/ new values. If disabled, matching records" +" will be skipped." +msgstr "" + +#. module: connector_importer +#: model:ir.model.fields,help:connector_importer.field_import_backend__debug_mode +msgid "" +"Enabling debug mode causes the import to run in real time, without using any" +" job queue. Make sure you don't do this in production!" +msgstr "" + +#. module: connector_importer +#: model:ir.model.fields.selection,name:connector_importer.selection__import_recordset__server_action_trigger_on__each_importer_done +msgid "End of each importer session" +msgstr "" + +#. module: connector_importer +#: model:ir.model.fields.selection,name:connector_importer.selection__import_recordset__server_action_trigger_on__last_importer_done +msgid "End of the whole import" +msgstr "" + +#. module: connector_importer +#: model:ir.model.fields.selection,name:connector_importer.selection__import_recordset__jobs_global_state__enqueued +msgid "Enqueued" +msgstr "" + +#. module: connector_importer +#: model:ir.model.fields,field_description:connector_importer.field_import_source_csv__example_file_ext_id +msgid "Example File Ext" +msgstr "" + +#. module: connector_importer +#: model:ir.model.fields,help:connector_importer.field_import_recordset__server_action_ids +msgid "" +"Execute a server action when done. You can link a server action per model or" +" a single one for import.recordset. In that case you'll have to use low " +"level api to get the records that were processed. Eg: `get_report_by_model`." +msgstr "" + +#. module: connector_importer +#: model:ir.model.fields,field_description:connector_importer.field_import_recordset__server_action_ids +msgid "Executre server actions" +msgstr "" + +#. module: connector_importer +#: model:ir.model.fields.selection,name:connector_importer.selection__import_recordset__jobs_global_state__failed +msgid "Failed" +msgstr "" + +#. module: connector_importer +#: model_terms:ir.ui.view,arch_db:connector_importer.recordset_docs +msgid "Field" +msgstr "" + +#. module: connector_importer +#: model:ir.model.fields,help:connector_importer.field_import_type__use_job +msgid "" +"For each importer used in the settings, one job will be spawned. Untick the " +"box if an importer depends on the result of a previous one (for instance to " +"link a record to the previously created one)." +msgstr "" + +#. module: connector_importer +#: model:ir.model.fields,field_description:connector_importer.field_import_recordset__full_report_url +msgid "Full Report Url" +msgstr "" + +#. module: connector_importer +#: model_terms:ir.ui.view,arch_db:connector_importer.view_import_recordset_form +msgid "Generate report" +msgstr "" + +#. module: connector_importer +#: model_terms:ir.ui.view,arch_db:connector_importer.view_import_recordset_search +msgid "Group By" +msgstr "" + +#. module: connector_importer +#: model:ir.model.fields,field_description:connector_importer.field_import_backend__id +#: model:ir.model.fields,field_description:connector_importer.field_import_record__id +#: model:ir.model.fields,field_description:connector_importer.field_import_recordset__id +#: model:ir.model.fields,field_description:connector_importer.field_import_source_csv__id +#: model:ir.model.fields,field_description:connector_importer.field_import_type__id +msgid "ID" +msgstr "" + +#. module: connector_importer +#: model:ir.model.fields,help:connector_importer.field_import_backend__cron_master_recordset_id +msgid "" +"If an existing recordset is selected it will be used to create a new recordset each time the cron runs. \n" +"In this way you can keep every import session isolated. \n" +"If none, all recordsets will run." +msgstr "" + +#. module: connector_importer +#: model:ir.model.fields,help:connector_importer.field_import_backend__cron_cleanup_keep +msgid "" +"If this value is greater than 0 a cron will cleanup old recordsets and keep " +"only the latest N records matching this value." +msgstr "" + +#. module: connector_importer +#: model_terms:ir.ui.view,arch_db:connector_importer.view_import_backend_form +#: model_terms:ir.ui.view,arch_db:connector_importer.view_import_recordset_form +msgid "Import" +msgstr "" + +#. module: connector_importer +#: model:ir.model.fields,field_description:connector_importer.field_import_recordset__backend_id +#: model_terms:ir.ui.view,arch_db:connector_importer.view_import_backend_form +msgid "Import Backend" +msgstr "" + +#. module: connector_importer +#: model:ir.actions.act_window,name:connector_importer.action_import_backend +#: model:ir.ui.menu,name:connector_importer.menu_import_backend +msgid "Import Backends" +msgstr "" + +#. module: connector_importer +#: model:ir.actions.act_window,name:connector_importer.action_import_recordset +#: model:ir.ui.menu,name:connector_importer.menu_import_recordset +msgid "Import Recordsets" +msgstr "" + +#. module: connector_importer +#: model_terms:ir.ui.view,arch_db:connector_importer.view_import_backend_form +msgid "Import all" +msgstr "" + +#. module: connector_importer +#: model:ir.model.fields,field_description:connector_importer.field_import_record__date +msgid "Import date" +msgstr "" + +#. module: connector_importer +#: model:ir.model,name:connector_importer.model_import_record +msgid "Import record" +msgstr "" + +#. module: connector_importer +#: model:ir.model,name:connector_importer.model_import_recordset +#: model_terms:ir.ui.view,arch_db:connector_importer.view_import_recordset_form +msgid "Import recordset" +msgstr "" + +#. module: connector_importer +#: model:ir.model,name:connector_importer.model_import_source +msgid "Import source" +msgstr "" + +#. module: connector_importer +#: model:ir.model,name:connector_importer.model_import_source_consumer_mixin +msgid "Import source consumer" +msgstr "" + +#. module: connector_importer +#: model:ir.actions.act_window,name:connector_importer.action_import_source +#: model:ir.ui.menu,name:connector_importer.menu_import_settings_sources +msgid "Import sources" +msgstr "" + +#. module: connector_importer +#: model:ir.model,name:connector_importer.model_import_type +#: model:ir.model.fields,field_description:connector_importer.field_import_recordset__import_type_id +msgid "Import type" +msgstr "" + +#. module: connector_importer +#: model:ir.model.constraint,message:connector_importer.constraint_import_type_key_uniq +msgid "Import type `key` must be unique!" +msgstr "" + +#. module: connector_importer +#: model:ir.actions.act_window,name:connector_importer.action_import_type +#: model:ir.ui.menu,name:connector_importer.menu_import_settings_types +msgid "Import types" +msgstr "" + +#. module: connector_importer +#: model:ir.model.fields,field_description:connector_importer.field_import_recordset__importable_model_ids +msgid "Importable Model" +msgstr "" + +#. module: connector_importer +#: model_terms:ir.ui.view,arch_db:connector_importer.recordset_docs +msgid "Importable models:" +msgstr "" + +#. module: connector_importer +#: model:ir.module.category,name:connector_importer.module_category_connector_importer +#: model:ir.ui.menu,name:connector_importer.menu_importer_root +msgid "Importer" +msgstr "" + +#. module: connector_importer +#: model:ir.model,name:connector_importer.model_import_backend +msgid "Importer Backend" +msgstr "" + +#. module: connector_importer +#: model:ir.actions.server,name:connector_importer.ir_cron_import_cleanup_recorsets_ir_actions_server +#: model:ir.cron,cron_name:connector_importer.ir_cron_import_cleanup_recorsets +msgid "Importer backend: cleanup old recordsets" +msgstr "" + +#. module: connector_importer +#: model:ir.model.fields,field_description:connector_importer.field_cron_mixin__cron_interval_number +#: model:ir.model.fields,field_description:connector_importer.field_import_backend__cron_interval_number +msgid "Interval number" +msgstr "" + +#. module: connector_importer +#: model:ir.model.fields,field_description:connector_importer.field_cron_mixin__cron_interval_type +#: model:ir.model.fields,field_description:connector_importer.field_import_backend__cron_interval_type +msgid "Interval type" +msgstr "" + +#. module: connector_importer +#: model:ir.model.fields,field_description:connector_importer.field_import_record__job_id +#: model:ir.model.fields,field_description:connector_importer.field_import_recordset__job_id +#: model:ir.model.fields,field_description:connector_importer.field_job_related_mixin__job_id +msgid "Job" +msgstr "" + +#. module: connector_importer +#: model:ir.model.fields,field_description:connector_importer.field_import_backend__job_running +msgid "Job Running" +msgstr "" + +#. module: connector_importer +#: model_terms:ir.ui.view,arch_db:connector_importer.view_import_recordset_search +msgid "Job State" +msgstr "" + +#. module: connector_importer +#: model:ir.model.fields,field_description:connector_importer.field_import_recordset__jobs_global_state +msgid "Jobs Global State" +msgstr "" + +#. module: connector_importer +#: model:ir.model.fields,field_description:connector_importer.field_import_record__jsondata_file +msgid "Jsondata File" +msgstr "" + +#. module: connector_importer +#: model:ir.model.fields,field_description:connector_importer.field_import_type__key +msgid "Key" +msgstr "" + +#. module: connector_importer +#: model:ir.model.fields,field_description:connector_importer.field_import_backend____last_update +#: model:ir.model.fields,field_description:connector_importer.field_import_record____last_update +#: model:ir.model.fields,field_description:connector_importer.field_import_recordset____last_update +#: model:ir.model.fields,field_description:connector_importer.field_import_source_csv____last_update +#: model:ir.model.fields,field_description:connector_importer.field_import_type____last_update +msgid "Last Modified on" +msgstr "" + +#. module: connector_importer +#: model:ir.model.fields,field_description:connector_importer.field_import_recordset__last_run_on +msgid "Last Run On" +msgstr "" + +#. module: connector_importer +#: model:ir.model.fields,field_description:connector_importer.field_import_backend__write_uid +#: model:ir.model.fields,field_description:connector_importer.field_import_record__write_uid +#: model:ir.model.fields,field_description:connector_importer.field_import_recordset__write_uid +#: model:ir.model.fields,field_description:connector_importer.field_import_source_csv__write_uid +#: model:ir.model.fields,field_description:connector_importer.field_import_type__write_uid +msgid "Last Updated by" +msgstr "" + +#. module: connector_importer +#: model:ir.model.fields,field_description:connector_importer.field_import_backend__write_date +#: model:ir.model.fields,field_description:connector_importer.field_import_record__write_date +#: model:ir.model.fields,field_description:connector_importer.field_import_recordset__write_date +#: model:ir.model.fields,field_description:connector_importer.field_import_source_csv__write_date +#: model:ir.model.fields,field_description:connector_importer.field_import_type__write_date +msgid "Last Updated on" +msgstr "" + +#. module: connector_importer +#: model_terms:ir.ui.view,arch_db:connector_importer.recordset_report +msgid "Last start:" +msgstr "" + +#. module: connector_importer +#: model_terms:ir.ui.view,arch_db:connector_importer.recordset_docs +msgid "Legend: TODO" +msgstr "" + +#. module: connector_importer +#: model_terms:ir.ui.view,arch_db:connector_importer.view_import_type_form +msgid "Main" +msgstr "" + +#. module: connector_importer +#: model_terms:ir.ui.view,arch_db:connector_importer.view_import_backend_form +msgid "Make all jobs completed" +msgstr "" + +#. module: connector_importer +#. odoo-python +#: code:addons/connector_importer/components/mapper.py:0 +#, python-format +msgid "Malformated xml id ref: `%s`" +msgstr "" + +#. module: connector_importer +#: model:ir.model.fields,field_description:connector_importer.field_import_backend__cron_master_recordset_id +msgid "Master recordset" +msgstr "" + +#. module: connector_importer +#: model:ir.model,name:connector_importer.model_job_related_mixin +msgid "Mixin klass for queue.job relationship." +msgstr "" + +#. module: connector_importer +#: model:ir.model.fields,field_description:connector_importer.field_import_backend__name +#: model:ir.model.fields,field_description:connector_importer.field_import_recordset__name +#: model:ir.model.fields,field_description:connector_importer.field_import_source__name +#: model:ir.model.fields,field_description:connector_importer.field_import_source_csv__name +#: model:ir.model.fields,field_description:connector_importer.field_import_type__name +msgid "Name" +msgstr "" + +#. module: connector_importer +#: model:ir.model.fields.selection,name:connector_importer.selection__import_recordset__server_action_trigger_on__never +msgid "Never" +msgstr "" + +#. module: connector_importer +#: model:ir.model.fields.selection,name:connector_importer.selection__import_recordset__jobs_global_state__no_job +msgid "No job" +msgstr "" + +#. module: connector_importer +#. odoo-python +#: code:addons/connector_importer/models/import_type.py:0 +#, python-format +msgid "No options found for: {}." +msgstr "" + +#. module: connector_importer +#. odoo-python +#: code:addons/connector_importer/components/importer.py:0 +#, python-format +msgid "No source configured on recordset '%s'" +msgstr "" + +#. module: connector_importer +#: model:ir.model.fields,field_description:connector_importer.field_import_backend__notes +#: model:ir.model.fields,field_description:connector_importer.field_import_recordset__notes +#: model_terms:ir.ui.view,arch_db:connector_importer.view_import_backend_form +msgid "Notes" +msgstr "" + +#. module: connector_importer +#: model_terms:ir.ui.view,arch_db:connector_importer.recordset_docs +msgid "Notes:" +msgstr "" + +#. module: connector_importer +#: model:ir.model.fields,field_description:connector_importer.field_import_type__options +msgid "Options" +msgstr "" + +#. module: connector_importer +#: model_terms:ir.ui.view,arch_db:connector_importer.view_import_backend_form +msgid "Override existing" +msgstr "" + +#. module: connector_importer +#: model:ir.model.fields,field_description:connector_importer.field_import_recordset__override_existing +msgid "Override existing items" +msgstr "" + +#. module: connector_importer +#: model:ir.model.fields.selection,name:connector_importer.selection__import_recordset__jobs_global_state__pending +msgid "Pending" +msgstr "" + +#. module: connector_importer +#: model_terms:ir.ui.view,arch_db:connector_importer.view_import_recordset_form +msgid "Raw Status" +msgstr "" + +#. module: connector_importer +#: model:ir.model.fields,field_description:connector_importer.field_import_backend__recordset_ids +msgid "Record Sets" +msgstr "" + +#. module: connector_importer +#: model:ir.model.fields,field_description:connector_importer.field_import_recordset__record_ids +#: model_terms:ir.ui.view,arch_db:connector_importer.view_import_recordset_form +msgid "Records" +msgstr "" + +#. module: connector_importer +#: model:ir.model.fields,field_description:connector_importer.field_import_record__recordset_id +msgid "Recordset" +msgstr "" + +#. module: connector_importer +#: model_terms:ir.ui.view,arch_db:connector_importer.view_import_backend_form +msgid "Recordsets" +msgstr "" + +#. module: connector_importer +#: model:ir.model.fields,field_description:connector_importer.field_cron_mixin__cron_id +#: model:ir.model.fields,field_description:connector_importer.field_import_backend__cron_id +msgid "Related cron" +msgstr "" + +#. module: connector_importer +#: model_terms:ir.ui.view,arch_db:connector_importer.view_import_recordset_form +msgid "Report" +msgstr "" + +#. module: connector_importer +#: model:ir.model.fields,field_description:connector_importer.field_import_recordset__report_data +msgid "Report Data" +msgstr "" + +#. module: connector_importer +#: model:ir.model.fields,field_description:connector_importer.field_import_recordset__report_file +msgid "Report File" +msgstr "" + +#. module: connector_importer +#: model:ir.model.fields,field_description:connector_importer.field_import_recordset__report_filename +msgid "Report Filename" +msgstr "" + +#. module: connector_importer +#: model_terms:ir.ui.view,arch_db:connector_importer.view_import_recordset_form +msgid "Report file" +msgstr "" + +#. module: connector_importer +#: model_terms:ir.ui.view,arch_db:connector_importer.recordset_report +#: model_terms:ir.ui.view,arch_db:connector_importer.recordset_report_full +msgid "Report for" +msgstr "" + +#. module: connector_importer +#: model:ir.model.fields,field_description:connector_importer.field_import_recordset__report_html +msgid "Report summary" +msgstr "" + +#. module: connector_importer +#: model:ir.model,name:connector_importer.model_reporter_csv +msgid "Reporter producing a CSV report" +msgstr "" + +#. module: connector_importer +#: model_terms:ir.ui.view,arch_db:connector_importer.recordset_docs +msgid "Required" +msgstr "" + +#. module: connector_importer +#: model_terms:ir.ui.view,arch_db:connector_importer.view_import_recordset_search +msgid "Search recordset" +msgstr "" + +#. module: connector_importer +#: model:ir.model.fields,field_description:connector_importer.field_import_recordset__sequence +msgid "Sequence" +msgstr "" + +#. module: connector_importer +#: model:ir.model.fields,help:connector_importer.field_import_recordset__sequence +msgid "Sequence for the handle." +msgstr "" + +#. module: connector_importer +#: model:ir.model.fields,field_description:connector_importer.field_import_recordset__server_action_importable_model_ids +msgid "Server Action Importable Model" +msgstr "" + +#. module: connector_importer +#: model:ir.model.fields,field_description:connector_importer.field_import_recordset__server_action_trigger_on +msgid "Server Action Trigger On" +msgstr "" + +#. module: connector_importer +#: model_terms:ir.ui.view,arch_db:connector_importer.view_import_recordset_form +msgid "Server actions" +msgstr "" + +#. module: connector_importer +#: model:ir.ui.menu,name:connector_importer.menu_import_settings_root +msgid "Settings" +msgstr "" + +#. module: connector_importer +#: model:ir.model.fields,field_description:connector_importer.field_import_recordset__shared_data +msgid "Shared Data" +msgstr "" + +#. module: connector_importer +#: model:ir.model.fields,field_description:connector_importer.field_import_recordset__source_ref_id +#: model:ir.model.fields,field_description:connector_importer.field_import_source_consumer_mixin__source_ref_id +#: model_terms:ir.ui.view,arch_db:connector_importer.view_import_backend_form +#: model_terms:ir.ui.view,arch_db:connector_importer.view_import_recordset_form +msgid "Source" +msgstr "" + +#. module: connector_importer +#: model:ir.model.fields,field_description:connector_importer.field_import_recordset__source_config_summary +#: model:ir.model.fields,field_description:connector_importer.field_import_source_consumer_mixin__source_config_summary +msgid "Source Config Summary" +msgstr "" + +#. module: connector_importer +#: model:ir.model.fields,field_description:connector_importer.field_import_recordset__source_id +#: model:ir.model.fields,field_description:connector_importer.field_import_source_consumer_mixin__source_id +msgid "Source ID" +msgstr "" + +#. module: connector_importer +#: model_terms:ir.ui.view,arch_db:connector_importer.recordset_docs +msgid "Source key" +msgstr "" + +#. module: connector_importer +#: model:ir.model.fields,field_description:connector_importer.field_import_recordset__source_model +#: model:ir.model.fields,field_description:connector_importer.field_import_source_consumer_mixin__source_model +msgid "Source type" +msgstr "" + +#. module: connector_importer +#: model:ir.model.fields,field_description:connector_importer.field_cron_mixin__cron_start_date +#: model:ir.model.fields,field_description:connector_importer.field_import_backend__cron_start_date +msgid "Start date" +msgstr "" + +#. module: connector_importer +#: model:ir.model.fields.selection,name:connector_importer.selection__import_recordset__jobs_global_state__started +msgid "Started" +msgstr "" + +#. module: connector_importer +#: model:ir.model.fields,field_description:connector_importer.field_import_record__job_state +#: model:ir.model.fields,field_description:connector_importer.field_import_recordset__job_state +#: model:ir.model.fields,field_description:connector_importer.field_job_related_mixin__job_state +msgid "State" +msgstr "" + +#. module: connector_importer +#: model:ir.model.fields,help:connector_importer.field_import_recordset__importable_model_ids +#: model:ir.model.fields,help:connector_importer.field_import_recordset__server_action_importable_model_ids +msgid "Technical field" +msgstr "" + +#. module: connector_importer +#: model:ir.model.fields,help:connector_importer.field_import_backend__job_running +msgid "Tells you if a job is running for this backend." +msgstr "" + +#. module: connector_importer +#: model:ir.model.fields,help:connector_importer.field_import_recordset__jobs_global_state +msgid "" +"Tells you if a job is running for this recordset. If any of the sub jobs is " +"not DONE or FAILED we assume the global state is PENDING." +msgstr "" + +#. module: connector_importer +#. odoo-python +#: code:addons/connector_importer/utils/misc.py:0 +#, python-format +msgid "" +"The ID reference '%s' must contain maximum one dot (or 0). They are used to " +"refer to other modules ID, in the form: module.record_id" +msgstr "" + +#. module: connector_importer +#: model_terms:ir.ui.view,arch_db:connector_importer.view_import_backend_form +msgid "" +"This action will mark all jobs as done even if in progress. Are you sure?" +msgstr "" + +#. module: connector_importer +#: model_terms:ir.ui.view,arch_db:connector_importer.recordset_docs +msgid "Translatable" +msgstr "" + +#. module: connector_importer +#: model_terms:ir.ui.view,arch_db:connector_importer.view_import_recordset_form +msgid "Trigger on" +msgstr "" + +#. module: connector_importer +#: model_terms:ir.ui.view,arch_db:connector_importer.view_import_recordset_search +#: model_terms:ir.ui.view,arch_db:connector_importer.view_import_recordset_tree +msgid "Type" +msgstr "" + +#. module: connector_importer +#: model:ir.model.fields,help:connector_importer.field_import_type__key +msgid "Unique mnemonic identifier" +msgstr "" + +#. module: connector_importer +#: model:ir.model.fields,field_description:connector_importer.field_import_type__use_job +msgid "Use Job" +msgstr "" + +#. module: connector_importer +#: model:ir.model.fields,help:connector_importer.field_import_recordset__notes +msgid "Useful info for your users" +msgstr "" + +#. module: connector_importer +#: model_terms:ir.ui.view,arch_db:connector_importer.recordset_docs +msgid "Value" +msgstr "" + +#. module: connector_importer +#: model:ir.model.fields,field_description:connector_importer.field_import_backend__version +msgid "Version" +msgstr "" + +#. module: connector_importer +#: model:ir.model.fields.selection,name:connector_importer.selection__import_recordset__jobs_global_state__wait_dependencies +msgid "Wait Dependencies" +msgstr "" + +#. module: connector_importer +#: model:ir.model.fields,help:connector_importer.field_import_type__options +msgid "YAML configuration" +msgstr "" + +#. module: connector_importer +#: model_terms:ir.ui.view,arch_db:connector_importer.view_import_backend_form +msgid "You are about to run ALL configured recordsets. Are you sure?" +msgstr "" + +#. module: connector_importer +#: model:ir.model.fields,help:connector_importer.field_import_source_csv__example_file_ext_id +msgid "" +"You can define example file by creating attachments with an external ID matching the 'import.source.csv' record external ID:\n" +"\t${import.source.csv.ExtID}_example_file\n" +"\n" +"You can also specify your own external ID by filling this field." +msgstr "" + +#. module: connector_importer +#. odoo-python +#: code:addons/connector_importer/models/backend.py:0 +#: code:addons/connector_importer/models/job_mixin.py:0 +#, python-format +msgid "You must complete the job first!" +msgstr "" diff --git a/connector_importer/i18n/it.po b/connector_importer/i18n/it.po new file mode 100644 index 000000000..2ba4779e0 --- /dev/null +++ b/connector_importer/i18n/it.po @@ -0,0 +1,934 @@ +# Translation of Odoo Server. +# This file contains the translation of the following modules: +# * connector_importer +# +msgid "" +msgstr "" +"Project-Id-Version: Odoo Server 16.0\n" +"Report-Msgid-Bugs-To: \n" +"PO-Revision-Date: 2024-08-29 11:06+0000\n" +"Last-Translator: mymage \n" +"Language-Team: none\n" +"Language: it\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: \n" +"Plural-Forms: nplurals=2; plural=n != 1;\n" +"X-Generator: Weblate 5.6.2\n" + +#. module: connector_importer +#: model_terms:ir.ui.view,arch_db:connector_importer.view_import_backend_form +msgid "" +"JOBS RUNNING\n" +" WARNING: one or more jobs are scheduled for a recorset or a record.\n" +" You will not be able to run the import again or to delete this backend\n" +" until you complete the jobs." +msgstr "" +"LAVORI IN ESECUZIONE\n" +" ATTENZIONE: uno più lavori sono schedulati per " +"uno o più record.\n" +" Non si potrà rieseguire l'importazione o " +"cancellare questo backend\n" +" fino al termine di questi lavori." + +#. module: connector_importer +#: model:ir.model.fields,help:connector_importer.field_import_type__name +msgid "A meaningful human-friendly name" +msgstr "Un nome chiaro e comprensibile" + +#. module: connector_importer +#: model_terms:ir.ui.view,arch_db:connector_importer.view_import_recordset_form +msgid "Additional notes" +msgstr "Note aggiuntive" + +#. module: connector_importer +#: model_terms:ir.ui.view,arch_db:connector_importer.view_import_type_form +msgid "Adv. options" +msgstr "Opzioni avanzate" + +#. module: connector_importer +#: model_terms:ir.ui.view,arch_db:connector_importer.view_import_type_form +msgid "Advanced" +msgstr "Avanzato" + +#. module: connector_importer +#: model:ir.model.fields,field_description:connector_importer.field_import_record__backend_id +#: model_terms:ir.ui.view,arch_db:connector_importer.view_import_recordset_search +msgid "Backend" +msgstr "Backend" + +#. module: connector_importer +#: model_terms:ir.ui.view,arch_db:connector_importer.view_import_recordset_form +msgid "Base configuration" +msgstr "Configurazione base" + +#. module: connector_importer +#: model:ir.model,name:connector_importer.model_reporter_mixin +msgid "Base mixin for reporters" +msgstr "Mixin base per i generatori di resoconti" + +#. module: connector_importer +#: model:ir.model.fields,field_description:connector_importer.field_import_source_csv__csv_encoding +msgid "CSV Encoding" +msgstr "Codifica CSV" + +#. module: connector_importer +#: model:ir.model.fields,field_description:connector_importer.field_import_source_csv__csv_delimiter +msgid "CSV delimiter" +msgstr "Delimitatore CSV" + +#. module: connector_importer +#: model:ir.model.fields,field_description:connector_importer.field_import_source_csv__csv_file +msgid "CSV file" +msgstr "Fila CSV" + +#. module: connector_importer +#: model:ir.model.fields,field_description:connector_importer.field_import_source_csv__csv_filename +msgid "CSV filename" +msgstr "Nome file CSV" + +#. module: connector_importer +#: model:ir.model.fields,field_description:connector_importer.field_import_source_csv__csv_filesize +msgid "CSV filesize" +msgstr "Dimensione file CSV" + +#. module: connector_importer +#: model:ir.model,name:connector_importer.model_import_source_csv +msgid "CSV import source" +msgstr "Sorgente importazione CSV" + +#. module: connector_importer +#: model:ir.model.fields,field_description:connector_importer.field_import_source_csv__csv_path +msgid "CSV path" +msgstr "Percorso CSV" + +#. module: connector_importer +#: model:ir.model.fields,field_description:connector_importer.field_import_source_csv__csv_quotechar +msgid "CSV quotechar" +msgstr "Carattere virgolette CSV" + +#. module: connector_importer +#: model:ir.model.fields,field_description:connector_importer.field_import_source_csv__csv_rows_from_to +msgid "" +"CSV use only a slice of the available lines. Format: $from:$to. NOTE: " +"recommended only for debug/test purpose." +msgstr "" +"CSV usa solo una parte delle linee disponibili. Formato: $from:$to. NOTA: " +"consigliato solo per scopi di debug/test." + +#. module: connector_importer +#: model:ir.model.fields.selection,name:connector_importer.selection__import_recordset__jobs_global_state__cancelled +msgid "Cancelled" +msgstr "Annullato" + +#. module: connector_importer +#: model:ir.model.fields,field_description:connector_importer.field_import_source__chunk_size +#: model:ir.model.fields,field_description:connector_importer.field_import_source_csv__chunk_size +msgid "Chunks Size" +msgstr "Dimensione pezzo" + +#. module: connector_importer +#: model:ir.model.fields,field_description:connector_importer.field_import_source__config_summary +#: model:ir.model.fields,field_description:connector_importer.field_import_source_csv__config_summary +msgid "Config Summary" +msgstr "Riepilogo configurazione" + +#. module: connector_importer +#: model_terms:ir.ui.view,arch_db:connector_importer.view_import_backend_form +msgid "Configuration" +msgstr "Configurazione" + +#. module: connector_importer +#: model_terms:ir.ui.view,arch_db:connector_importer.view_import_recordset_form +#: model_terms:ir.ui.view,arch_db:connector_importer.view_import_source_form +msgid "Configure source" +msgstr "Configurazione sorgente" + +#. module: connector_importer +#: model:res.groups,name:connector_importer.group_importer_user +msgid "Connector importer user" +msgstr "Utente importazione connettore" + +#. module: connector_importer +#: model:ir.model.fields,field_description:connector_importer.field_import_recordset__create_date +msgid "Create Date" +msgstr "Data creazione" + +#. module: connector_importer +#: model:ir.model.fields,field_description:connector_importer.field_import_backend__create_uid +#: model:ir.model.fields,field_description:connector_importer.field_import_record__create_uid +#: model:ir.model.fields,field_description:connector_importer.field_import_recordset__create_uid +#: model:ir.model.fields,field_description:connector_importer.field_import_source_csv__create_uid +#: model:ir.model.fields,field_description:connector_importer.field_import_type__create_uid +msgid "Created by" +msgstr "Creato da" + +#. module: connector_importer +#: model:ir.model.fields,field_description:connector_importer.field_import_backend__create_date +#: model:ir.model.fields,field_description:connector_importer.field_import_record__create_date +#: model:ir.model.fields,field_description:connector_importer.field_import_source_csv__create_date +#: model:ir.model.fields,field_description:connector_importer.field_import_type__create_date +msgid "Created on" +msgstr "Creato il" + +#. module: connector_importer +#: model_terms:ir.ui.view,arch_db:connector_importer.view_import_backend_form +msgid "Cron" +msgstr "Cron" + +#. module: connector_importer +#: model:ir.model.fields,field_description:connector_importer.field_import_backend__cron_cleanup_keep +msgid "Cron Cleanup Keep" +msgstr "Mantenere pulizia cron" + +#. module: connector_importer +#: model:ir.model,name:connector_importer.model_cron_mixin +msgid "Cron Mixin" +msgstr "Mixin cron" + +#. module: connector_importer +#: model:ir.model.fields,field_description:connector_importer.field_cron_mixin__cron_mode +#: model:ir.model.fields,field_description:connector_importer.field_import_backend__cron_mode +msgid "Cron mode?" +msgstr "Modalità cron?" + +#. module: connector_importer +#: model:ir.model.fields,field_description:connector_importer.field_import_backend__debug_mode +msgid "Debug mode?" +msgstr "Modalità debug?" + +#. module: connector_importer +#: model_terms:ir.ui.view,arch_db:connector_importer.recordset_docs +msgid "Defaults" +msgstr "Predefiniti" + +#. module: connector_importer +#: model:ir.model.fields,field_description:connector_importer.field_import_type__description +msgid "Description" +msgstr "Descrizione" + +#. module: connector_importer +#: model_terms:ir.ui.view,arch_db:connector_importer.recordset_docs +msgid "Destination key" +msgstr "Chiave destinazione" + +#. module: connector_importer +#: model:ir.model.fields,field_description:connector_importer.field_import_backend__display_name +#: model:ir.model.fields,field_description:connector_importer.field_import_record__display_name +#: model:ir.model.fields,field_description:connector_importer.field_import_recordset__display_name +#: model:ir.model.fields,field_description:connector_importer.field_import_source_csv__display_name +#: model:ir.model.fields,field_description:connector_importer.field_import_type__display_name +msgid "Display Name" +msgstr "Nome visualizzato" + +#. module: connector_importer +#: model:ir.model.fields,field_description:connector_importer.field_import_recordset__docs_html +#: model_terms:ir.ui.view,arch_db:connector_importer.view_import_recordset_form +msgid "Docs" +msgstr "Documenti" + +#. module: connector_importer +#: model_terms:ir.ui.view,arch_db:connector_importer.recordset_docs +msgid "Docs for" +msgstr "Documenti per" + +#. module: connector_importer +#: model:ir.model.fields.selection,name:connector_importer.selection__import_recordset__jobs_global_state__done +msgid "Done" +msgstr "Eseguito" + +#. module: connector_importer +#: model:ir.model.fields,field_description:connector_importer.field_import_source_csv__example_file_url +msgid "Download example file" +msgstr "Scarica file esempio" + +#. module: connector_importer +#: model:ir.model.fields,help:connector_importer.field_import_recordset__override_existing +msgid "" +"Enable to update existing items w/ new values. If disabled, matching records" +" will be skipped." +msgstr "" +"Abilitare per aggiornare gli elementi esistenti con nuovi valori. Se " +"disabilitato, i record corrispondenti verranno saltati." + +#. module: connector_importer +#: model:ir.model.fields,help:connector_importer.field_import_backend__debug_mode +msgid "" +"Enabling debug mode causes the import to run in real time, without using any" +" job queue. Make sure you don't do this in production!" +msgstr "" +"Abilitare la modalità debug fa sì che l'importazione venga eseguita in tempo " +"reale, senza usare alcuna coda di lavoro. Assicurati di non farlo in " +"produzione!" + +#. module: connector_importer +#: model:ir.model.fields.selection,name:connector_importer.selection__import_recordset__server_action_trigger_on__each_importer_done +msgid "End of each importer session" +msgstr "Termine di ogni sessione dell'importatore" + +#. module: connector_importer +#: model:ir.model.fields.selection,name:connector_importer.selection__import_recordset__server_action_trigger_on__last_importer_done +msgid "End of the whole import" +msgstr "Termine dell'importazione" + +#. module: connector_importer +#: model:ir.model.fields.selection,name:connector_importer.selection__import_recordset__jobs_global_state__enqueued +msgid "Enqueued" +msgstr "In coda" + +#. module: connector_importer +#: model:ir.model.fields,field_description:connector_importer.field_import_source_csv__example_file_ext_id +msgid "Example File Ext" +msgstr "Esempio file esterno" + +#. module: connector_importer +#: model:ir.model.fields,help:connector_importer.field_import_recordset__server_action_ids +msgid "" +"Execute a server action when done. You can link a server action per model or" +" a single one for import.recordset. In that case you'll have to use low " +"level api to get the records that were processed. Eg: `get_report_by_model`." +msgstr "" +"Eseguire un'azione server quando finito. Si può collegare un'azione server " +"per modello o una singola per import.recordset. In quel caso si dovrà usare " +"un'API di basso livello per ottenere i record che sono stati elaborati. Ad " +"esempio: `get_report_by_model`." + +#. module: connector_importer +#: model:ir.model.fields,field_description:connector_importer.field_import_recordset__server_action_ids +msgid "Executre server actions" +msgstr "Esecuzione azioni server" + +#. module: connector_importer +#: model:ir.model.fields.selection,name:connector_importer.selection__import_recordset__jobs_global_state__failed +msgid "Failed" +msgstr "Fallito" + +#. module: connector_importer +#: model_terms:ir.ui.view,arch_db:connector_importer.recordset_docs +msgid "Field" +msgstr "Campo" + +#. module: connector_importer +#: model:ir.model.fields,help:connector_importer.field_import_type__use_job +msgid "" +"For each importer used in the settings, one job will be spawned. Untick the " +"box if an importer depends on the result of a previous one (for instance to " +"link a record to the previously created one)." +msgstr "" +"Per ogni importatore utilizzato nelle impostazioni, verrà generato un " +"lavoro. Deselezionare la casella se un importatore dipende dal risultato di " +"uno precedente (ad esempio per collegare un record a quello creato in " +"precedenza)." + +#. module: connector_importer +#: model:ir.model.fields,field_description:connector_importer.field_import_recordset__full_report_url +msgid "Full Report Url" +msgstr "URL completo resoconto" + +#. module: connector_importer +#: model_terms:ir.ui.view,arch_db:connector_importer.view_import_recordset_form +msgid "Generate report" +msgstr "Genera resoconto" + +#. module: connector_importer +#: model_terms:ir.ui.view,arch_db:connector_importer.view_import_recordset_search +msgid "Group By" +msgstr "Raggruppa per" + +#. module: connector_importer +#: model:ir.model.fields,field_description:connector_importer.field_import_backend__id +#: model:ir.model.fields,field_description:connector_importer.field_import_record__id +#: model:ir.model.fields,field_description:connector_importer.field_import_recordset__id +#: model:ir.model.fields,field_description:connector_importer.field_import_source_csv__id +#: model:ir.model.fields,field_description:connector_importer.field_import_type__id +msgid "ID" +msgstr "ID" + +#. module: connector_importer +#: model:ir.model.fields,help:connector_importer.field_import_backend__cron_master_recordset_id +msgid "" +"If an existing recordset is selected it will be used to create a new recordset each time the cron runs. \n" +"In this way you can keep every import session isolated. \n" +"If none, all recordsets will run." +msgstr "" +"Se viene selezionato un recordset esistente, verrà utilizzato per creare un " +"nuovo recordset ogni volta che viene eseguito il cron.\n" +"In questo modo si può mantenere isolata ogni sessione di importazione.\n" +"Se non ce n'è nessuno, verranno eseguiti tutti i recordset." + +#. module: connector_importer +#: model:ir.model.fields,help:connector_importer.field_import_backend__cron_cleanup_keep +msgid "" +"If this value is greater than 0 a cron will cleanup old recordsets and keep " +"only the latest N records matching this value." +msgstr "" +"Se questo valore è maggiore di 0, un cron pulirà i vecchi recordset e " +"conserverà solo gli ultimi N record che corrispondono a questo valore." + +#. module: connector_importer +#: model_terms:ir.ui.view,arch_db:connector_importer.view_import_backend_form +#: model_terms:ir.ui.view,arch_db:connector_importer.view_import_recordset_form +msgid "Import" +msgstr "Importa" + +#. module: connector_importer +#: model:ir.model.fields,field_description:connector_importer.field_import_recordset__backend_id +#: model_terms:ir.ui.view,arch_db:connector_importer.view_import_backend_form +msgid "Import Backend" +msgstr "Backend importazione" + +#. module: connector_importer +#: model:ir.actions.act_window,name:connector_importer.action_import_backend +#: model:ir.ui.menu,name:connector_importer.menu_import_backend +msgid "Import Backends" +msgstr "Backend importazione" + +#. module: connector_importer +#: model:ir.actions.act_window,name:connector_importer.action_import_recordset +#: model:ir.ui.menu,name:connector_importer.menu_import_recordset +msgid "Import Recordsets" +msgstr "Recordset importazione" + +#. module: connector_importer +#: model_terms:ir.ui.view,arch_db:connector_importer.view_import_backend_form +msgid "Import all" +msgstr "Importa tutto" + +#. module: connector_importer +#: model:ir.model.fields,field_description:connector_importer.field_import_record__date +msgid "Import date" +msgstr "Data importazione" + +#. module: connector_importer +#: model:ir.model,name:connector_importer.model_import_record +msgid "Import record" +msgstr "Record importazione" + +#. module: connector_importer +#: model:ir.model,name:connector_importer.model_import_recordset +#: model_terms:ir.ui.view,arch_db:connector_importer.view_import_recordset_form +msgid "Import recordset" +msgstr "Recordset importazione" + +#. module: connector_importer +#: model:ir.model,name:connector_importer.model_import_source +msgid "Import source" +msgstr "Sorgente importazione" + +#. module: connector_importer +#: model:ir.model,name:connector_importer.model_import_source_consumer_mixin +msgid "Import source consumer" +msgstr "Origine consumatore importazione" + +#. module: connector_importer +#: model:ir.actions.act_window,name:connector_importer.action_import_source +#: model:ir.ui.menu,name:connector_importer.menu_import_settings_sources +msgid "Import sources" +msgstr "Sorgenti importazione" + +#. module: connector_importer +#: model:ir.model,name:connector_importer.model_import_type +#: model:ir.model.fields,field_description:connector_importer.field_import_recordset__import_type_id +msgid "Import type" +msgstr "Tipo importazione" + +#. module: connector_importer +#: model:ir.model.constraint,message:connector_importer.constraint_import_type_key_uniq +msgid "Import type `key` must be unique!" +msgstr "La `key` tipo importazione deve essere univoca!" + +#. module: connector_importer +#: model:ir.actions.act_window,name:connector_importer.action_import_type +#: model:ir.ui.menu,name:connector_importer.menu_import_settings_types +msgid "Import types" +msgstr "Tipi importazione" + +#. module: connector_importer +#: model:ir.model.fields,field_description:connector_importer.field_import_recordset__importable_model_ids +msgid "Importable Model" +msgstr "Modello importabile" + +#. module: connector_importer +#: model_terms:ir.ui.view,arch_db:connector_importer.recordset_docs +msgid "Importable models:" +msgstr "Modelli importabili:" + +#. module: connector_importer +#: model:ir.module.category,name:connector_importer.module_category_connector_importer +#: model:ir.ui.menu,name:connector_importer.menu_importer_root +msgid "Importer" +msgstr "Importatore" + +#. module: connector_importer +#: model:ir.model,name:connector_importer.model_import_backend +msgid "Importer Backend" +msgstr "Backend importatore" + +#. module: connector_importer +#: model:ir.actions.server,name:connector_importer.ir_cron_import_cleanup_recorsets_ir_actions_server +#: model:ir.cron,cron_name:connector_importer.ir_cron_import_cleanup_recorsets +msgid "Importer backend: cleanup old recordsets" +msgstr "Backend importatore: ripulisci vecchi recordset" + +#. module: connector_importer +#: model:ir.model.fields,field_description:connector_importer.field_cron_mixin__cron_interval_number +#: model:ir.model.fields,field_description:connector_importer.field_import_backend__cron_interval_number +msgid "Interval number" +msgstr "Numero intervallo" + +#. module: connector_importer +#: model:ir.model.fields,field_description:connector_importer.field_cron_mixin__cron_interval_type +#: model:ir.model.fields,field_description:connector_importer.field_import_backend__cron_interval_type +msgid "Interval type" +msgstr "Tipo intervallo" + +#. module: connector_importer +#: model:ir.model.fields,field_description:connector_importer.field_import_record__job_id +#: model:ir.model.fields,field_description:connector_importer.field_import_recordset__job_id +#: model:ir.model.fields,field_description:connector_importer.field_job_related_mixin__job_id +msgid "Job" +msgstr "Lavoro" + +#. module: connector_importer +#: model:ir.model.fields,field_description:connector_importer.field_import_backend__job_running +msgid "Job Running" +msgstr "Lavoro in esecuzione" + +#. module: connector_importer +#: model_terms:ir.ui.view,arch_db:connector_importer.view_import_recordset_search +msgid "Job State" +msgstr "Stato lavoro" + +#. module: connector_importer +#: model:ir.model.fields,field_description:connector_importer.field_import_recordset__jobs_global_state +msgid "Jobs Global State" +msgstr "Stato globale lavori" + +#. module: connector_importer +#: model:ir.model.fields,field_description:connector_importer.field_import_record__jsondata_file +msgid "Jsondata File" +msgstr "File dati JSON" + +#. module: connector_importer +#: model:ir.model.fields,field_description:connector_importer.field_import_type__key +msgid "Key" +msgstr "Chiave" + +#. module: connector_importer +#: model:ir.model.fields,field_description:connector_importer.field_import_backend____last_update +#: model:ir.model.fields,field_description:connector_importer.field_import_record____last_update +#: model:ir.model.fields,field_description:connector_importer.field_import_recordset____last_update +#: model:ir.model.fields,field_description:connector_importer.field_import_source_csv____last_update +#: model:ir.model.fields,field_description:connector_importer.field_import_type____last_update +msgid "Last Modified on" +msgstr "Ultima modifica il" + +#. module: connector_importer +#: model:ir.model.fields,field_description:connector_importer.field_import_recordset__last_run_on +msgid "Last Run On" +msgstr "Ultima esecuzione il" + +#. module: connector_importer +#: model:ir.model.fields,field_description:connector_importer.field_import_backend__write_uid +#: model:ir.model.fields,field_description:connector_importer.field_import_record__write_uid +#: model:ir.model.fields,field_description:connector_importer.field_import_recordset__write_uid +#: model:ir.model.fields,field_description:connector_importer.field_import_source_csv__write_uid +#: model:ir.model.fields,field_description:connector_importer.field_import_type__write_uid +msgid "Last Updated by" +msgstr "Ultimo aggiornamento di" + +#. module: connector_importer +#: model:ir.model.fields,field_description:connector_importer.field_import_backend__write_date +#: model:ir.model.fields,field_description:connector_importer.field_import_record__write_date +#: model:ir.model.fields,field_description:connector_importer.field_import_recordset__write_date +#: model:ir.model.fields,field_description:connector_importer.field_import_source_csv__write_date +#: model:ir.model.fields,field_description:connector_importer.field_import_type__write_date +msgid "Last Updated on" +msgstr "Ultimo aggiornamento il" + +#. module: connector_importer +#: model_terms:ir.ui.view,arch_db:connector_importer.recordset_report +msgid "Last start:" +msgstr "Ultimo inizio:" + +#. module: connector_importer +#: model_terms:ir.ui.view,arch_db:connector_importer.recordset_docs +msgid "Legend: TODO" +msgstr "Legenda: DAFARE" + +#. module: connector_importer +#: model_terms:ir.ui.view,arch_db:connector_importer.view_import_type_form +msgid "Main" +msgstr "Principale" + +#. module: connector_importer +#: model_terms:ir.ui.view,arch_db:connector_importer.view_import_backend_form +msgid "Make all jobs completed" +msgstr "Imposta completati tutti i lavori" + +#. module: connector_importer +#. odoo-python +#: code:addons/connector_importer/components/mapper.py:0 +#, python-format +msgid "Malformated xml id ref: `%s`" +msgstr "Riferimento ID XML malformato: `%s`" + +#. module: connector_importer +#: model:ir.model.fields,field_description:connector_importer.field_import_backend__cron_master_recordset_id +msgid "Master recordset" +msgstr "Recordset master" + +#. module: connector_importer +#: model:ir.model,name:connector_importer.model_job_related_mixin +msgid "Mixin klass for queue.job relationship." +msgstr "Classe mixin per relazione queue.job." + +#. module: connector_importer +#: model:ir.model.fields,field_description:connector_importer.field_import_backend__name +#: model:ir.model.fields,field_description:connector_importer.field_import_recordset__name +#: model:ir.model.fields,field_description:connector_importer.field_import_source__name +#: model:ir.model.fields,field_description:connector_importer.field_import_source_csv__name +#: model:ir.model.fields,field_description:connector_importer.field_import_type__name +msgid "Name" +msgstr "Nome" + +#. module: connector_importer +#: model:ir.model.fields.selection,name:connector_importer.selection__import_recordset__server_action_trigger_on__never +msgid "Never" +msgstr "Mai" + +#. module: connector_importer +#: model:ir.model.fields.selection,name:connector_importer.selection__import_recordset__jobs_global_state__no_job +msgid "No job" +msgstr "Nessun lavoro" + +#. module: connector_importer +#. odoo-python +#: code:addons/connector_importer/models/import_type.py:0 +#, python-format +msgid "No options found for: {}." +msgstr "Nessuna opzione trovata per: {}." + +#. module: connector_importer +#. odoo-python +#: code:addons/connector_importer/components/importer.py:0 +#, python-format +msgid "No source configured on recordset '%s'" +msgstr "Nessuna sorgente configurata per il recordset: '%s'" + +#. module: connector_importer +#: model:ir.model.fields,field_description:connector_importer.field_import_backend__notes +#: model:ir.model.fields,field_description:connector_importer.field_import_recordset__notes +#: model_terms:ir.ui.view,arch_db:connector_importer.view_import_backend_form +msgid "Notes" +msgstr "Note" + +#. module: connector_importer +#: model_terms:ir.ui.view,arch_db:connector_importer.recordset_docs +msgid "Notes:" +msgstr "Note:" + +#. module: connector_importer +#: model:ir.model.fields,field_description:connector_importer.field_import_type__options +msgid "Options" +msgstr "Opzioni" + +#. module: connector_importer +#: model_terms:ir.ui.view,arch_db:connector_importer.view_import_backend_form +msgid "Override existing" +msgstr "Sovrascrivi esistente" + +#. module: connector_importer +#: model:ir.model.fields,field_description:connector_importer.field_import_recordset__override_existing +msgid "Override existing items" +msgstr "Sovrascrivi elementi esistenti" + +#. module: connector_importer +#: model:ir.model.fields.selection,name:connector_importer.selection__import_recordset__jobs_global_state__pending +msgid "Pending" +msgstr "In sospeso" + +#. module: connector_importer +#: model_terms:ir.ui.view,arch_db:connector_importer.view_import_recordset_form +msgid "Raw Status" +msgstr "Stato grezzo" + +#. module: connector_importer +#: model:ir.model.fields,field_description:connector_importer.field_import_backend__recordset_ids +msgid "Record Sets" +msgstr "Gruppi record" + +#. module: connector_importer +#: model:ir.model.fields,field_description:connector_importer.field_import_recordset__record_ids +#: model_terms:ir.ui.view,arch_db:connector_importer.view_import_recordset_form +msgid "Records" +msgstr "Record" + +#. module: connector_importer +#: model:ir.model.fields,field_description:connector_importer.field_import_record__recordset_id +msgid "Recordset" +msgstr "Recordset" + +#. module: connector_importer +#: model_terms:ir.ui.view,arch_db:connector_importer.view_import_backend_form +msgid "Recordsets" +msgstr "Recordset" + +#. module: connector_importer +#: model:ir.model.fields,field_description:connector_importer.field_cron_mixin__cron_id +#: model:ir.model.fields,field_description:connector_importer.field_import_backend__cron_id +msgid "Related cron" +msgstr "Cron relativo" + +#. module: connector_importer +#: model_terms:ir.ui.view,arch_db:connector_importer.view_import_recordset_form +msgid "Report" +msgstr "Resoconto" + +#. module: connector_importer +#: model:ir.model.fields,field_description:connector_importer.field_import_recordset__report_data +msgid "Report Data" +msgstr "Data resoconto" + +#. module: connector_importer +#: model:ir.model.fields,field_description:connector_importer.field_import_recordset__report_file +msgid "Report File" +msgstr "File resoconto" + +#. module: connector_importer +#: model:ir.model.fields,field_description:connector_importer.field_import_recordset__report_filename +msgid "Report Filename" +msgstr "Nome file resoconto" + +#. module: connector_importer +#: model_terms:ir.ui.view,arch_db:connector_importer.view_import_recordset_form +msgid "Report file" +msgstr "File resoconto" + +#. module: connector_importer +#: model_terms:ir.ui.view,arch_db:connector_importer.recordset_report +#: model_terms:ir.ui.view,arch_db:connector_importer.recordset_report_full +msgid "Report for" +msgstr "Resoconto per" + +#. module: connector_importer +#: model:ir.model.fields,field_description:connector_importer.field_import_recordset__report_html +msgid "Report summary" +msgstr "Riepilogo resoconto" + +#. module: connector_importer +#: model:ir.model,name:connector_importer.model_reporter_csv +msgid "Reporter producing a CSV report" +msgstr "Generatore resoconti produce un resoconto CSV" + +#. module: connector_importer +#: model_terms:ir.ui.view,arch_db:connector_importer.recordset_docs +msgid "Required" +msgstr "Richiesto" + +#. module: connector_importer +#: model_terms:ir.ui.view,arch_db:connector_importer.view_import_recordset_search +msgid "Search recordset" +msgstr "Cerca recordset" + +#. module: connector_importer +#: model:ir.model.fields,field_description:connector_importer.field_import_recordset__sequence +msgid "Sequence" +msgstr "Sequenza" + +#. module: connector_importer +#: model:ir.model.fields,help:connector_importer.field_import_recordset__sequence +msgid "Sequence for the handle." +msgstr "Sequenza per la gestione." + +#. module: connector_importer +#: model:ir.model.fields,field_description:connector_importer.field_import_recordset__server_action_importable_model_ids +msgid "Server Action Importable Model" +msgstr "Modello importabile azione server" + +#. module: connector_importer +#: model:ir.model.fields,field_description:connector_importer.field_import_recordset__server_action_trigger_on +msgid "Server Action Trigger On" +msgstr "Trigger azione server ON" + +#. module: connector_importer +#: model_terms:ir.ui.view,arch_db:connector_importer.view_import_recordset_form +msgid "Server actions" +msgstr "Azioni server" + +#. module: connector_importer +#: model:ir.ui.menu,name:connector_importer.menu_import_settings_root +msgid "Settings" +msgstr "Impostazioni" + +#. module: connector_importer +#: model:ir.model.fields,field_description:connector_importer.field_import_recordset__shared_data +msgid "Shared Data" +msgstr "Dati condivisi" + +#. module: connector_importer +#: model:ir.model.fields,field_description:connector_importer.field_import_recordset__source_ref_id +#: model:ir.model.fields,field_description:connector_importer.field_import_source_consumer_mixin__source_ref_id +#: model_terms:ir.ui.view,arch_db:connector_importer.view_import_backend_form +#: model_terms:ir.ui.view,arch_db:connector_importer.view_import_recordset_form +msgid "Source" +msgstr "Sorgente" + +#. module: connector_importer +#: model:ir.model.fields,field_description:connector_importer.field_import_recordset__source_config_summary +#: model:ir.model.fields,field_description:connector_importer.field_import_source_consumer_mixin__source_config_summary +msgid "Source Config Summary" +msgstr "Riepilogo configurazione sorgente" + +#. module: connector_importer +#: model:ir.model.fields,field_description:connector_importer.field_import_recordset__source_id +#: model:ir.model.fields,field_description:connector_importer.field_import_source_consumer_mixin__source_id +msgid "Source ID" +msgstr "ID sorgente" + +#. module: connector_importer +#: model_terms:ir.ui.view,arch_db:connector_importer.recordset_docs +msgid "Source key" +msgstr "Chiave sorgente" + +#. module: connector_importer +#: model:ir.model.fields,field_description:connector_importer.field_import_recordset__source_model +#: model:ir.model.fields,field_description:connector_importer.field_import_source_consumer_mixin__source_model +msgid "Source type" +msgstr "Tipo sorgente" + +#. module: connector_importer +#: model:ir.model.fields,field_description:connector_importer.field_cron_mixin__cron_start_date +#: model:ir.model.fields,field_description:connector_importer.field_import_backend__cron_start_date +msgid "Start date" +msgstr "Data inizio" + +#. module: connector_importer +#: model:ir.model.fields.selection,name:connector_importer.selection__import_recordset__jobs_global_state__started +msgid "Started" +msgstr "Iniziato" + +#. module: connector_importer +#: model:ir.model.fields,field_description:connector_importer.field_import_record__job_state +#: model:ir.model.fields,field_description:connector_importer.field_import_recordset__job_state +#: model:ir.model.fields,field_description:connector_importer.field_job_related_mixin__job_state +msgid "State" +msgstr "Stato" + +#. module: connector_importer +#: model:ir.model.fields,help:connector_importer.field_import_recordset__importable_model_ids +#: model:ir.model.fields,help:connector_importer.field_import_recordset__server_action_importable_model_ids +msgid "Technical field" +msgstr "Campo tecnico" + +#. module: connector_importer +#: model:ir.model.fields,help:connector_importer.field_import_backend__job_running +msgid "Tells you if a job is running for this backend." +msgstr "Indica se è in esecuzione un lavoro per questo backend." + +#. module: connector_importer +#: model:ir.model.fields,help:connector_importer.field_import_recordset__jobs_global_state +msgid "" +"Tells you if a job is running for this recordset. If any of the sub jobs is " +"not DONE or FAILED we assume the global state is PENDING." +msgstr "" +"Ti dice se un lavoro è in esecuzione per questo recordset. Se uno dei sotto-" +"lavori non è DONE o FAILED, si assume che lo stato globale sia PENDING." + +#. module: connector_importer +#. odoo-python +#: code:addons/connector_importer/utils/misc.py:0 +#, python-format +msgid "" +"The ID reference '%s' must contain maximum one dot (or 0). They are used to " +"refer to other modules ID, in the form: module.record_id" +msgstr "" +"Il riferimento ID '%s' deve contenere al massimo un punto (o 0). Sono " +"utilizzati per fare riferimento ad altri ID di moduli, nella maschera: modulo" +".record_id" + +#. module: connector_importer +#: model_terms:ir.ui.view,arch_db:connector_importer.view_import_backend_form +msgid "" +"This action will mark all jobs as done even if in progress. Are you sure?" +msgstr "" +"Questa azione imposterà tutti i lavori a eseguiti anche se in corso. Si è " +"sicuri?" + +#. module: connector_importer +#: model_terms:ir.ui.view,arch_db:connector_importer.recordset_docs +msgid "Translatable" +msgstr "Traducibile" + +#. module: connector_importer +#: model_terms:ir.ui.view,arch_db:connector_importer.view_import_recordset_form +msgid "Trigger on" +msgstr "Trigger on" + +#. module: connector_importer +#: model_terms:ir.ui.view,arch_db:connector_importer.view_import_recordset_search +#: model_terms:ir.ui.view,arch_db:connector_importer.view_import_recordset_tree +msgid "Type" +msgstr "Tipo" + +#. module: connector_importer +#: model:ir.model.fields,help:connector_importer.field_import_type__key +msgid "Unique mnemonic identifier" +msgstr "Identificatore mnemonico univoco" + +#. module: connector_importer +#: model:ir.model.fields,field_description:connector_importer.field_import_type__use_job +msgid "Use Job" +msgstr "Utilizza lavoro" + +#. module: connector_importer +#: model:ir.model.fields,help:connector_importer.field_import_recordset__notes +msgid "Useful info for your users" +msgstr "Informazioni utili per gli utenti" + +#. module: connector_importer +#: model_terms:ir.ui.view,arch_db:connector_importer.recordset_docs +msgid "Value" +msgstr "Valore" + +#. module: connector_importer +#: model:ir.model.fields,field_description:connector_importer.field_import_backend__version +msgid "Version" +msgstr "Versione" + +#. module: connector_importer +#: model:ir.model.fields.selection,name:connector_importer.selection__import_recordset__jobs_global_state__wait_dependencies +msgid "Wait Dependencies" +msgstr "Attesa dipendenze" + +#. module: connector_importer +#: model:ir.model.fields,help:connector_importer.field_import_type__options +msgid "YAML configuration" +msgstr "Configurazione YAML" + +#. module: connector_importer +#: model_terms:ir.ui.view,arch_db:connector_importer.view_import_backend_form +msgid "You are about to run ALL configured recordsets. Are you sure?" +msgstr "Si stanno per eseguire TUTTI i recordset configurati. Si è sicuri?" + +#. module: connector_importer +#: model:ir.model.fields,help:connector_importer.field_import_source_csv__example_file_ext_id +msgid "" +"You can define example file by creating attachments with an external ID matching the 'import.source.csv' record external ID:\n" +"\t${import.source.csv.ExtID}_example_file\n" +"\n" +"You can also specify your own external ID by filling this field." +msgstr "" +"Si può definire un file di esempio creando allegati con un ID esterno " +"corrispondente all'ID esterno del record 'import.source.csv':\n" +"${import.source.csv.ExtID}_example_file\n" +"\n" +"Si può anche specificare l'ID esterno compilando questo campo." + +#. module: connector_importer +#. odoo-python +#: code:addons/connector_importer/models/backend.py:0 +#: code:addons/connector_importer/models/job_mixin.py:0 +#, python-format +msgid "You must complete the job first!" +msgstr "Bisogna prima completare il lavoro!" diff --git a/connector_importer/log.py b/connector_importer/log.py new file mode 100644 index 000000000..2357da627 --- /dev/null +++ b/connector_importer/log.py @@ -0,0 +1,26 @@ +# Author: Simone Orsi +# Copyright 2018 Camptocamp SA +# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html). + +import logging +import os +from logging.handlers import RotatingFileHandler + +LOGGER_NAME = "[importer]" +logger = logging.getLogger(LOGGER_NAME) +logger.setLevel(logging.INFO) + +if os.getenv("IMPORTER_LOG_PATH"): + # use separated log file when developing + FNAME = "import.log" + + base_path = os.environ.get("IMPORTER_LOG_PATH") + if not os.path.exists(base_path): + os.makedirs(base_path) + + # add a rotating handler + handler = RotatingFileHandler( + base_path + "/" + FNAME, maxBytes=1024 * 5, backupCount=5 + ) + logger.addHandler(handler) + logging.info("logging to {}".format(base_path + "/" + FNAME)) diff --git a/connector_importer/menuitems.xml b/connector_importer/menuitems.xml new file mode 100644 index 000000000..020081e7b --- /dev/null +++ b/connector_importer/menuitems.xml @@ -0,0 +1,39 @@ + + + + + + + + + diff --git a/connector_importer/models/__init__.py b/connector_importer/models/__init__.py new file mode 100644 index 000000000..cd22e0d7a --- /dev/null +++ b/connector_importer/models/__init__.py @@ -0,0 +1,8 @@ +from . import cron_mixin +from . import job_mixin +from . import backend +from . import import_type +from . import sources +from . import recordset +from . import record +from . import reporter diff --git a/connector_importer/models/backend.py b/connector_importer/models/backend.py new file mode 100644 index 000000000..39c21ed32 --- /dev/null +++ b/connector_importer/models/backend.py @@ -0,0 +1,145 @@ +# Author: Simone Orsi +# Copyright 2018 Camptocamp SA +# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html). + +import logging + +from odoo import _, api, exceptions, fields, models + +cleanup_logger = logging.getLogger("[recordset-cleanup]") + +BACKEND_VERSIONS = [("1.0", "Version 1.0")] + + +class ImporterBackend(models.Model): + _name = "import.backend" + _description = "Importer Backend" + _inherit = ["connector.backend", "cron.mixin"] + + @api.model + def _select_version(self): + """Available versions + + Can be inherited to add custom versions. + """ + return BACKEND_VERSIONS + + name = fields.Char(required=True) + version = fields.Selection(selection="_select_version", required=True) + recordset_ids = fields.One2many( + "import.recordset", "backend_id", string="Record Sets" + ) + # cron stuff + cron_master_recordset_id = fields.Many2one( + "import.recordset", + string="Master recordset", + help=( + "If an existing recordset is selected " + "it will be used to create a new recordset " + "each time the cron runs. " + "\nIn this way you can keep every import session isolated. " + "\nIf none, all recordsets will run." + ), + ) + cron_cleanup_keep = fields.Integer( + help=( + "If this value is greater than 0 " + "a cron will cleanup old recordsets " + "and keep only the latest N records matching this value." + ), + ) + notes = fields.Text() + debug_mode = fields.Boolean( + "Debug mode?", + help=( + "Enabling debug mode causes the import to run " + "in real time, without using any job queue. " + "Make sure you don't do this in production!" + ), + ) + job_running = fields.Boolean( + compute="_compute_job_running", + help="Tells you if a job is running for this backend.", + readonly=True, + ) + + def unlink(self): + """Prevent delete if jobs are running.""" + for item in self: + item._check_delete() + return super().unlink() + + def _check_delete(self): + if not self.debug_mode and self.job_running: + raise exceptions.Warning(_("You must complete the job first!")) + + def _compute_job_running(self): + for item in self: + running = False + for recordset in self.recordset_ids: + if recordset.has_job() and not recordset.job_done(): + running = True + break + for record in recordset.record_ids: + if record.has_job() and not record.job_done(): + running = True + break + item.job_running = running + + @api.model + def run_cron(self, backend_id): + # required by cron mixin + self.browse(backend_id).run_all() + + def run_all(self): + """Run all recordset imports.""" + self.ensure_one() + recordsets = self.recordset_ids + if self.cron_master_recordset_id: + # clone and use it to run + recordsets = self.cron_master_recordset_id.copy() + for recordset in recordsets: + if not recordset.source_ref_id: + # source not configured yet, useless to run + continue + recordset.run_import() + + @api.model + def cron_cleanup_recordsets(self): + """Delete obsolete recordsets. + + If you are running imports via cron and you create one recorset + per each run then you might end up w/ tons of old recordsets. + + You can use `cron_cleanup_keep` to enable auto-cleanup. + Here we lookup for backends w/ this settings + and keep only latest recordsets. + """ + cleanup_logger.info("Looking for recorsets to cleanup.") + backends = self.search([("cron_cleanup_keep", ">", 0)]) + to_clean = self.env["import.recordset"] + for backend in backends: + if len(backend.recordset_ids) <= backend.cron_cleanup_keep: + continue + to_keep = backend.recordset_ids.sorted( + lambda x: x.create_date, reverse=True + )[: backend.cron_cleanup_keep] + # always keep this + to_keep |= backend.cron_master_recordset_id + to_clean = backend.recordset_ids - to_keep + if to_clean: + msg = "Cleaning up {}".format(",".join(to_clean.mapped("name"))) + cleanup_logger.info(msg) + to_clean.unlink() + else: + cleanup_logger.info("Nothing to do.") + + def button_complete_jobs(self): + """Set all jobs to "completed" state.""" + self.ensure_one() + for recordset in self.recordset_ids: + for record in recordset.record_ids: + if record.has_job() and not record.job_done(): + record.job_id.button_done() + if recordset.has_job() and not recordset.job_done(): + recordset.job_id.button_done() diff --git a/connector_importer/models/cron_mixin.py b/connector_importer/models/cron_mixin.py new file mode 100644 index 000000000..1a98a558f --- /dev/null +++ b/connector_importer/models/cron_mixin.py @@ -0,0 +1,85 @@ +# Author: Simone Orsi +# Copyright 2018 Camptocamp SA +# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html). + +from odoo import api, fields, models + + +class CronMixin(models.AbstractModel): + """Add cron-related features to your models. + + On inheriting models you can: + + * enable cron mode + * configure a cron + * save and get a specific cron to run something on your model + + You have to implement the method `run_cron`. + """ + + _name = "cron.mixin" + _description = "Cron Mixin" + + cron_mode = fields.Boolean("Cron mode?") + cron_start_date = fields.Datetime("Start date") + cron_interval_number = fields.Integer("Interval number") + cron_interval_type = fields.Selection( + selection="_select_interval_type", string="Interval type" + ) + cron_id = fields.Many2one( + "ir.cron", + string="Related cron", + domain=lambda self: [ + ("model_id", "=", self.env["ir.model"]._get_id(self._name)) + ], + ) + + @api.model + def _select_interval_type(self): + return [ + ("hours", "Hours"), + ("work_days", "Work Days"), + ("days", "Days"), + ("weeks", "Weeks"), + ("months", "Months"), + ] + + @api.model + def get_cron_vals(self): + model_id = self.env["ir.model"]._get_id(self._name) + return { + "name": f"Cron for import backend {self.name}", + "model_id": model_id, + "state": "code", + "code": f"model.run_cron({self.id})", + "interval_number": self.cron_interval_number, + "interval_type": self.cron_interval_type, + "nextcall": self.cron_start_date, + } + + def _update_or_create_cron(self): + """Update or create cron record if needed.""" + if self.cron_mode: + cron_model = self.env["ir.cron"] + cron_vals = self.get_cron_vals() + if not self.cron_id: + self.cron_id = cron_model.create(cron_vals) + else: + self.cron_id.write(cron_vals) + + @api.model_create_multi + def create(self, vals_list): + records = super().create(vals_list) + for rec in records: + rec._update_or_create_cron() + return records + + def write(self, vals): + res = super().write(vals) + for backend in self: + backend._update_or_create_cron() + return res + + @api.model + def run_cron(self): + raise NotImplementedError() diff --git a/connector_importer/models/import_type.py b/connector_importer/models/import_type.py new file mode 100644 index 000000000..77a7ba73f --- /dev/null +++ b/connector_importer/models/import_type.py @@ -0,0 +1,136 @@ +# Author: Simone Orsi +# Copyright 2018 Camptocamp SA +# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html). + +import logging + +from odoo import _, api, exceptions, fields, models +from odoo.tools import DotDict + +_logger = logging.getLogger(__name__) + + +try: + import yaml +except ImportError: + _logger.debug("`yaml` lib is missing") + + +class ImportType(models.Model): + """Define an import. + + An import type describes what an recordset should do. + You can describe an import using the `options` field with YAML format. + Here you can declare what you want to import (model) and how (importer). + + Options example: + + - model: product.template + importer: template.importer.component.name + context: + key1: foo + # will be ignored + description: a nice import + options: + mapper: + one: False + tracking_handler: + one: False + + - model: product.product + importer: product.importer.component.name + context: + key1: foo + # will be ignored + description: a nice import + options: + importer: + break_on_error: True + mapper: + name: my.nice.mapper + record_handler: + one: False + + The model is what you want to import, the importer states + the name of the connector component to handle the import for that model. + + The importer machinery will run the imports for all the models declared + and will retrieve their specific importerts to execute them. + """ + + _name = "import.type" + _description = "Import type" + + name = fields.Char(required=True, help="A meaningful human-friendly name") + description = fields.Text() + key = fields.Char(required=True, help="Unique mnemonic identifier", copy=False) + options = fields.Text(help="YAML configuration") + use_job = fields.Boolean( + help=( + "For each importer used in the settings, one job will be spawned. " + "Untick the box if an importer depends on the result of a " + "previous one (for instance to link a record to the previously " + "created one)." + ), + default=True, + ) + _sql_constraints = [ + ("key_uniq", "unique (key)", "Import type `key` must be unique!") + ] + # TODO: provide default source and configuration policy + # for an import type to ease bootstrapping recordsets from UI. + # default_source_model_id = fields.Many2one() + + @api.constrains("options") + def _check_options(self): + no_options = self.browse() + for rec in self: + if not rec.options: + no_options += rec + # TODO: validate yaml schema (maybe w/ Cerberus?) + if no_options: + raise exceptions.UserError( + _("No options found for: {}.").format( + ", ".join(no_options.mapped("name")) + ) + ) + + def _load_options(self): + return yaml.safe_load(self.options or "") or [] + + def available_importers(self): + self.ensure_one() + options = self._load_options() + for line in options: + is_last_importer = False + if line == options[-1]: + is_last_importer = True + yield self._make_importer_info(line, is_last_importer=is_last_importer) + + def _make_importer_info(self, line, is_last_importer=True): + """Prepare importer information. + + :param line: dictionary representing a config line from `options` + :param is_last_importer: boolean to state if the line represents the last one + :return: odoo.tools.DotDict instance containing all importer options. + """ + res = DotDict(line, is_last_importer=is_last_importer) + for key in ("importer", "options", "context"): + if key not in res: + res[key] = self._importer_info_defaults.get(key, {}) + for k in ("importer", "mapper", "record_handler", "tracking_handler"): + if k not in res.options: + res["options"][k] = {} + return res + + _importer_info_defaults = { + "importer": { + "name": "importer.record", + }, + } + + def copy_data(self, default=None): + res = super().copy_data(default) + for data, rec in zip(res, self, strict=True): + data["key"] = rec.key + "_COPY_FIXME" + return res diff --git a/connector_importer/models/job_mixin.py b/connector_importer/models/job_mixin.py new file mode 100644 index 000000000..2ecc28d90 --- /dev/null +++ b/connector_importer/models/job_mixin.py @@ -0,0 +1,32 @@ +# Author: Simone Orsi +# Copyright 2018 Camptocamp SA +# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html). + +from odoo import _, exceptions, fields, models + +from odoo.addons.queue_job.job import DONE + + +class JobRelatedMixin(models.AbstractModel): + """Mixin klass for queue.job relationship.""" + + _name = "job.related.mixin" + _description = __doc__ + + job_id = fields.Many2one("queue.job", string="Job", readonly=True) + job_state = fields.Selection(index=True, related="job_id.state") + + def has_job(self): + return bool(self.job_id) + + def job_done(self): + return self.job_state == DONE + + def _check_delete(self): + if self.has_job() and not self.job_done(): + raise exceptions.Warning(_("You must complete the job first!")) + + def unlink(self): + for item in self: + item._check_delete() + return super().unlink() diff --git a/connector_importer/models/record.py b/connector_importer/models/record.py new file mode 100644 index 000000000..9f53252d2 --- /dev/null +++ b/connector_importer/models/record.py @@ -0,0 +1,132 @@ +# Author: Simone Orsi +# Copyright 2018 Camptocamp SA +# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html). + +import base64 +import json +import os + +from odoo import api, fields, models + +from ..log import logger +from ..utils.misc import get_importer_for_config + + +class ImportRecord(models.Model): + """Data to be imported. + + An import record contains what you are actually importing. + + Depending on backend settings you gonna have one or more source records + stored as JSON data into `jsondata` field. + + No matter where you are importing from (CSV, SQL, etc) + the importer machinery will: + + * retrieve the models to import and their importer + * process all records and import them + * update recordset info + + When the importer will run, it will read all the records, + convert them using connector mappers and do the import. + """ + + _name = "import.record" + _inherit = "job.related.mixin" + _description = "Import record" + _order = "id" + _backend_type = "import_backend" + + date = fields.Datetime("Import date", default=fields.Datetime.now) + # This field holds the whole bare data to import from the external source + # hence it can be huge. For this reason we store it in an attachment. + jsondata_file = fields.Binary(attachment=True) + recordset_id = fields.Many2one("import.recordset", string="Recordset") + backend_id = fields.Many2one( + "import.backend", + string="Backend", + related="recordset_id.backend_id", + readonly=True, + ) + + @api.depends("date") + def _compute_name(self): + for item in self: + names = [item.date] + item.name = " / ".join([_f for _f in names if _f]) + + def set_data(self, adict): + self.ensure_one() + jsondata = json.dumps(adict) + self.jsondata_file = base64.b64encode(bytes(jsondata, "utf-8")) + + def get_data(self): + self.ensure_one() + jsondata = None + if self.jsondata_file: + raw_data = base64.b64decode(self.jsondata_file).decode("utf-8") + jsondata = json.loads(raw_data) + return jsondata or {} + + def debug_mode(self): + self.ensure_one() + return self.backend_id.debug_mode or os.environ.get("IMPORTER_DEBUG_MODE") + + def _should_use_jobs(self): + self.ensure_one() + debug_mode = self.debug_mode() + if debug_mode: + logger.warning("### DEBUG MODE ACTIVE: WILL NOT USE QUEUE ###") + use_job = self.recordset_id.import_type_id.use_job + if debug_mode: + use_job = False + return use_job + + def import_record(self, importer_config): + """This job will import a record. + + # TODO rewrite + :param component_name: name of the importer component to use + :param model_name: name of the model to import + :param is_last_importer: flag for last importer of the recordset + """ + importer = get_importer_for_config(self.backend_id, self._name, importer_config) + return importer.run( + self, is_last_importer=importer_config.get("is_last_importer") + ) + + def run_import(self): + """Queue a job for importing data stored in to self""" + self.ensure_one() + result = self._run_import(use_job=self._should_use_jobs()) + return result + + def _run_import(self, use_job=True): + res = {} + # we create a record and a job for each model name + # that needs to be imported + new_self = self.with_context(queue_job__no_delay=not use_job) + for config in self.recordset_id.available_importers(): + if self.debug_mode() or not use_job: + result = new_self.import_record(config) + # debug mode, no job here: reset it! + self.write({"job_id": False}) + else: + result = new_self.with_delay( + **self._run_import_job_params(config) + ).import_record(config) + # FIXME: we should have a o2m here otherwise + # w/ multiple importers for the same record + # we keep the reference on w/ the last job. + self.write({"job_id": result.db_record().id}) + res[config.model] = result + + return res + + def _run_import_job_params(self, config): + params = { + "description": ( + f"recordset {self.recordset_id.name}: import {config['model']}" + ) + } + return params diff --git a/connector_importer/models/recordset.py b/connector_importer/models/recordset.py new file mode 100644 index 000000000..5743d7725 --- /dev/null +++ b/connector_importer/models/recordset.py @@ -0,0 +1,409 @@ +# Author: Simone Orsi +# Copyright 2018 Camptocamp SA +# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html). + +import base64 +import json +import os +from collections import OrderedDict + +from odoo import api, fields, models + +from odoo.addons.component.utils import is_component_registry_ready +from odoo.addons.queue_job.job import DONE, STATES + +from ..log import logger +from ..utils.misc import get_importer_for_config, to_b64 + + +class ImportRecordset(models.Model): + """Set of records, together with their configuration. + + A recordset can be considered as an "import session". + Here you declare: + + * what you want to import (via "Import type") + * where you get records from (via "Source" configuration) + + A recordset is also responsible to hold and display some meaningful + information about imports: + + * required fields, translatable fields, defaults + * import stats (created|updated|skipped|errored counters, latest run) + * fully customizable HTML report to provide more details + * downloadable report file (via reporters) + * global states of running jobs + + When you run the import of a recordset this is what it does: + + * ask the source to provide all the records (chunked) + * create an import record for each chunk + * schedule the import job for each import record + """ + + _name = "import.recordset" + _inherit = [ + "import.source.consumer.mixin", + "job.related.mixin", + ] + _description = "Import recordset" + _order = "sequence ASC, create_date DESC" + _backend_type = "import_backend" + + backend_id = fields.Many2one("import.backend", string="Import Backend") + sequence = fields.Integer(help="Sequence for the handle.", default=10) + import_type_id = fields.Many2one( + string="Import type", comodel_name="import.type", required=True + ) + override_existing = fields.Boolean( + string="Override existing items", + help="Enable to update existing items w/ new values. " + "If disabled, matching records will be skipped.", + default=True, + ) + name = fields.Char(compute="_compute_name") + create_date = fields.Datetime() + record_ids = fields.One2many("import.record", "recordset_id", string="Records") + # store info about imports report + report_data = fields.Binary(attachment=True) + shared_data = fields.Binary(attachment=True) + report_html = fields.Html("Report summary", compute="_compute_report_html") + full_report_url = fields.Char(compute="_compute_full_report_url") + jobs_global_state = fields.Selection( + selection=[("no_job", "No job")] + STATES, + default="no_job", + compute="_compute_jobs_global_state", + help=( + "Tells you if a job is running for this recordset. " + "If any of the sub jobs is not DONE or FAILED " + "we assume the global state is PENDING." + ), + ) + report_file = fields.Binary() + report_filename = fields.Char() + docs_html = fields.Html(string="Docs", compute="_compute_docs_html") + notes = fields.Html(help="Useful info for your users") + last_run_on = fields.Datetime() + server_action_trigger_on = fields.Selection( + selection=[ + ("never", "Never"), + ("last_importer_done", "End of the whole import"), + ("each_importer_done", "End of each importer session"), + ], + default="never", + ) + server_action_ids = fields.Many2many( + "ir.actions.server", + string="Executre server actions", + help=( + "Execute a server action when done. " + "You can link a server action per model " + "or a single one for import.recordset. " + "In that case you'll have to use low level api " + "to get the records that were processed. " + "Eg: `get_report_by_model`." + ), + ) + server_action_importable_model_ids = fields.Many2many( + comodel_name="ir.model", + compute="_compute_importable_model_ids", + relation="import_recordset_server_action_importable_model", + column1="recordset_id", + column2="model_id", + help="Technical field", + ) + importable_model_ids = fields.Many2many( + comodel_name="ir.model", + compute="_compute_importable_model_ids", + relation="import_recordset_importable_model", + column1="recordset_id", + column2="model_id", + help="Technical field", + ) + + def _compute_name(self): + for item in self: + item.name = f"#{item.id}" + + @api.depends("import_type_id.options") + def _compute_importable_model_ids(self): + _get = self.env["ir.model"]._get + for rec in self: + for config in rec.available_importers(): + rec.importable_model_ids |= _get(config.model) + rec.server_action_importable_model_ids = ( + _get(self._name) + rec.importable_model_ids + ) + + def get_records(self): + """Retrieve importable records and keep ordering.""" + return self.env["import.record"].search([("recordset_id", "=", self.id)]) + + def _set_serialized(self, fname, values, reset=False): + """Update serialized data.""" + _values = {} + if not reset: + _values = getattr(self, fname) or {} + if _values: + _values = self._get_json_from_binary(_values) + + _values.update(values) + json_report_data = json.dumps(_values) + _values = base64.b64encode(bytes(json_report_data, "utf-8")) + setattr(self, fname, _values) + # We need to invalidate the cache because the context dict + # bin_size=False triggers the _compute_datas(self) method + # which has the @api.depends_context('bin_size') decorator. + # Flush all pending computations and updates to the database. + domain = [ + ("res_model", "=", self._name), + ("res_field", "=", fname), + ("res_id", "in", self.ids), + ] + attachments = self.env["ir.attachment"].sudo().search(domain) + if attachments: + attachments.invalidate_recordset(("datas", "raw")) + + # Without invalidating cache we will have a bug because of Serialized + # field in odoo. It uses json.loads on convert_to_cache, which leads + # to all of our int dict keys converted to strings. Except for the + # first value get, where we get not from cache yet. + # SO if you plan on using integers as your dict keys for a serialized + # field beware that they will be converted to strings. + # In order to streamline this I invalidate cache right away so the + # values are converted right away + # TL/DR integer dict keys will always be converted to strings, beware + self.invalidate_recordset((fname,)) + + def set_report(self, values, reset=False): + """Update import report values.""" + self.ensure_one() + self._set_serialized("report_data", values, reset=reset) + + def _get_json_from_binary(self, binary_data): + json_raw_data = {} + if binary_data: + json_raw_data = base64.b64decode(binary_data).decode("utf-8") + json_raw_data = json.loads(json_raw_data) + return json_raw_data + + def get_report(self): + self.ensure_one() + json_raw_data = self._get_json_from_binary( + self.with_context(bin_size=False).report_data + ) + return json_raw_data + + def set_shared(self, values, reset=False): + """Update import report values.""" + self.ensure_one() + self._set_serialized("shared_data", values, reset=reset) + + def get_shared(self): + self.ensure_one() + json_raw_data = self._get_json_from_binary( + self.with_context(bin_size=False).shared_data + ) + return json_raw_data + + def _prepare_for_import_session(self, start=True): + """Wipe all session related data.""" + report_data = {} + if start: + report_data["_last_start"] = fields.Datetime.to_string( + fields.Datetime.now() + ) + json_report_data = json.dumps(report_data) + values = { + "record_ids": [(5, 0, 0)], + "report_data": base64.b64encode(bytes(json_report_data, "utf-8")), + "shared_data": {}, + } + self.write(values) + self.invalidate_recordset(tuple(values.keys())) + + def _get_report_html_data(self): + """Prepare data for HTML report. + + :return dict: containing data for HTML report. + + Keys: + ``recordset``: current recordset + ``last_start``: last time import ran + ``report_by_model``: report data grouped by model. Like: + data['report_by_model'] = { + ir.model(res.parner): { + 'errored': 1, + 'skipped': 4, + 'created': 10, + 'updated': 8, + } + } + """ + report = self.get_report() + data = { + "recordset": self, + "last_start": report.pop("_last_start"), + "report_by_model": self._get_report_by_model(), + } + return data + + def _get_report_by_model(self, counters_only=True): + report = self.get_report() + value_handler = ( + len if counters_only else lambda vals: [x["odoo_record"] for x in vals] + ) + res = OrderedDict() + # count keys by model + for config in self.available_importers(): + model = self.env["ir.model"]._get(config.model) + res[model] = {} + # be defensive here. At some point + # we could decide to skip models on demand. + for k, v in report.get(config.model, {}).items(): + res[model][k] = value_handler(v) + return res + + def get_report_by_model(self, model_name=None): + report = self._get_report_by_model(counters_only=False) + if model_name: + report = { + k.model: v for k, v in report.items() if k.model == model_name + }.get(model_name, {}) + return report + + @api.depends("report_data") + def _compute_report_html(self): + qweb = self.env["ir.qweb"].sudo() + for item in self: + item.report_html = False + if not item.report_data: + continue + data = item._get_report_html_data() + item.report_html = qweb._render("connector_importer.recordset_report", data) + + def _compute_full_report_url(self): + for item in self: + item.full_report_url = f"/importer/import-recordset/{item.id}" + + def debug_mode(self): + return self.backend_id.debug_mode or os.getenv("IMPORTER_DEBUG_MODE") + + @api.depends("job_id.state", "record_ids.job_id.state") + def _compute_jobs_global_state(self): + for item in self: + item.jobs_global_state = item._get_global_state() + + @api.model + def _get_global_state(self): + res = "no_job" + if not self.job_id or not self.record_ids: + return res + records_job_states = self.mapped("record_ids.job_id.state") + if all([x == DONE for x in records_job_states]): + res = DONE + else: + # pick the 1st one not done + not_done = [x for x in records_job_states if x != DONE] + res = not_done[0] if not_done else res + return res + + def available_importers(self): + return self.import_type_id.available_importers() if self.import_type_id else () + + def import_recordset(self): + """This job will import a recordset.""" + with self.backend_id.work_on(self._name) as work: + importer = work.component(usage="recordset.importer") + return importer.run(self) + + def run_import(self): + """queue a job for creating records (import.record items)""" + job_method = self.with_delay().import_recordset + if self.debug_mode(): + logger.warning("### DEBUG MODE ACTIVE: WILL NOT USE QUEUE ###") + job_method = self.import_recordset + + for item in self: + result = job_method() + if self.debug_mode(): + # debug mode, no job here: reset it! + item.write({"job_id": False}) + else: + # link the job + item.write({"job_id": result.db_record().id}) + self.last_run_on = fields.Datetime.now() + if self.debug_mode(): + # TODO: port this + # the "after_all" job needs to be fired manually when in debug mode + # since the event handler in .events.chunk_finished_subscriber + # cannot estimate when all the chunks have been processed. + # for model, importer in self.import_type_id.available_models(): + # import_record_after_all( + # session, + # self.backend_id.id, + # model, + # ) + pass + + def generate_report(self): + self.ensure_one() + reporter = self.get_source().get_reporter() + if reporter is None: + logger.debug("No reporter found...") + return + metadata, content = reporter.report_get(self) + self.write( + { + "report_file": to_b64(content.encode()), + "report_filename": metadata["complete_filename"], + } + ) + logger.info( + ("Report file updated on recordset={}. " "Filename: {}").format( + self.id, metadata["complete_filename"] + ) + ) + + def _get_importers(self): + importers = OrderedDict() + for importer_config in self.available_importers(): + model_record = self.env["ir.model"]._get(importer_config.model) + importers[model_record] = get_importer_for_config( + self.backend_id, self._name, importer_config + ) + return importers + + @api.depends("import_type_id") + def _compute_docs_html(self): + if not is_component_registry_ready(self.env.cr.dbname): + # We cannot render anything if we cannot load components + self.docs_html = False + return + qweb = self.env["ir.qweb"].sudo() + for item in self: + item.docs_html = False + if isinstance(item.id, models.NewId) or not item.backend_id: + # Surprise surprise: when editing a new recordset + # if you hit `configure source` btn + # the record will be saved but the backend can be null :S + continue + importers = item._get_importers() + data = {"recordset": item, "importers": importers} + item.docs_html = qweb._render("connector_importer.recordset_docs", data) + + +# TODO +# @job +# def import_record_after_all( +# session, backend_id, model_name, last_record_id=None, **kw): +# """This job will import a record.""" +# # TODO: check this +# model = 'import.record' +# env = get_environment(session, model, backend_id) +# # recordset = None +# # if last_record_id: +# # record = env[model].browse(last_record_id) +# # recordset = record.recordset_id +# importer = get_record_importer(env) +# return importer.after_all() diff --git a/connector_importer/models/reporter.py b/connector_importer/models/reporter.py new file mode 100644 index 000000000..c513fcdb0 --- /dev/null +++ b/connector_importer/models/reporter.py @@ -0,0 +1,258 @@ +# Author: Simone Orsi +# Copyright 2018 Camptocamp SA +# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html). + + +import base64 +import csv +import io +import time + +from odoo import api, models + +from ..utils.import_utils import get_encoding + + +class ReporterMixin(models.AbstractModel): + """Base mixin for reporters. + + A reporter can be used to produce a file with a summary of the import + that the user can generate and download on each recordset. + + The summary can be anything you like: you are in total control of it. + See the CSV example for a real case. + """ + + _name = "reporter.mixin" + _description = "Base mixin for reporters" + + report_extension = ".txt" + + @api.model + def report_get(self, recordset, **options): + """Create and return a report for given recordset.""" + with io.StringIO() as fileout: + self.report_do(recordset, fileout, **options) + self.report_finalize(recordset, fileout, **options) + metadata = self.report_get_metadata(recordset, **options) + return metadata, fileout.getvalue() + + def report_do(self, recordset, fileout, **options): + """Override me to generate the report.""" + raise NotImplementedError() + + def report_finalize(self, recordset, fileout, **options): + """Apply late updates to report.""" + + def report_get_metadata(self, recordset, **options): + """Retrieve report file's metadata.""" + fname = str(time.time()) + ext = self.report_extension + return {"filename": fname, "ext": ext, "complete_filename": fname + ext} + + +class CSVReporter(models.AbstractModel): + """Produce a CSV report. + + Very often is not easy to let the customer know what went wrong. + Here a CSV report is generated based on the initial CSV + provided by the customer/user. + + Basically we: + + * compute the stats of errored and skipped _get_lines + * clone the original CSV file + * add new columns at the end + + The new columns number is controlled by the flag `report_group_by_status`: + + * True: 2 new columns per each model imported. For instance: + * [R] res.partner skipped + * [R] res.partner errored + * [R] res.partner.category skipped + * [R] res.partner.category errored + * False: errors are grouped by state in 2 columns: + * [R] skipped + * [R] errored + + In this way the end user can check side by side which lines went wrong. + """ + + _name = "reporter.csv" + _description = "Reporter producing a CSV report" + _inherit = "reporter.mixin" + + report_extension = ".csv" + # columns to track/add + report_keys = ["skipped", "errored"] + # Flag to determine if status report must be grouped by status. + # If `True` report result will be merged by status (errored, skipped, ...) + report_group_by_status = True + + def report_get_writer(self, fileout, columns, delimiter=";", quotechar='"'): + writer = csv.DictWriter( + fileout, + columns, + delimiter=delimiter, + quoting=csv.QUOTE_NONNUMERIC, + quotechar=quotechar, + ) + writer.writeheader() + return writer + + def report_add_line(self, writer, item): + writer.writerow(item) + + def report_get_columns( + self, recordset, orig_content, extra_keys=None, delimiter=";", quotechar='"' + ): + """Retrieve columns by recordset. + + :param recordset: instance of recordset. + :param orig_content: original csv content list of line. + :param extra_keys: report-related extra columns. + """ + extra_keys = extra_keys or [] + # read only the 1st line of the original file + if orig_content: + reader = csv.reader( + orig_content[:1], delimiter=delimiter, quotechar=quotechar + ) + columns = next(reader) + return columns + extra_keys + return extra_keys + + def report_do(self, recordset, fileout, **options): + """Produce report.""" + json_report = recordset.get_report() + report_keys = options.get("report_keys", self.report_keys) + group_by_status = options.get("group_by_status", self.report_group_by_status) + + model_keys = [x for x in json_report.keys() if not x.startswith("_")] + + extra_keys = [self._report_make_key(x) for x in report_keys] + if not group_by_status: + # we produce one column per-model per-status + for model in model_keys: + for key in report_keys: + extra_keys.append(self._report_make_key(key, model=model)) + + source = recordset.get_source() + csv_file_bin = base64.b64decode(source.csv_file) + # Try to guess the encoding of the file supplied + csv_file_encoding = get_encoding(csv_file_bin).get("encoding", "utf-8") + orig_content = csv_file_bin.decode(csv_file_encoding).splitlines() + delimiter = source.csv_delimiter + quotechar = source.csv_quotechar + + columns = self.report_get_columns( + recordset, + orig_content, + extra_keys=extra_keys, + delimiter=delimiter, + quotechar=quotechar, + ) + + writer = self.report_get_writer( + fileout, columns, delimiter=delimiter, quotechar=quotechar + ) + + reader = csv.DictReader(orig_content, delimiter=delimiter, quotechar=quotechar) + + self._report_do( + json_report=json_report, + reader=reader, + writer=writer, + model_keys=model_keys, + report_keys=report_keys, + group_by_status=group_by_status, + ) + + def _report_do( + self, + json_report=None, + reader=None, + writer=None, + model_keys=None, + report_keys=None, + group_by_status=True, + ): + line_handler = self._report_line_by_model_and_status + if group_by_status: + line_handler = self._report_line_by_status + + grouped = self._report_group_by_line(json_report, model_keys, report_keys) + + for line in reader: + line_handler(line, reader.line_num, grouped, model_keys) + self.report_add_line(writer, line) + + def _report_make_key(self, key, model=""): + if model: + return f"[R] {model}: {key}" + return f"[R] {key}" + + def _report_group_by_line(self, json_report, model_keys, report_keys): + """Group report items by line number. + + Return something like: + + { + 'errored': {}, + 'skipped': { + 2: [ + { + u'line_nr': 2, + u'message': u'MISSING REQUIRED KEY=foo', + u'model': u'product.supplierinfo', + u'odoo_record': None + }, + { + u'line_nr': 2, + u'message': u'MISSING REQUIRED KEY=bla', + u'model': u'product.product', + u'odoo_record': None + }, + ], + 3: [ + { + u'line_nr': 3, + u'message': u'MISSING REQUIRED KEY=foo', + u'model': u'product.template', + u'odoo_record': None + }, + { + u'line_nr': 3, + u'message': u'ALREADY_EXISTS code=XXXX', + u'model': u'product.product', + u'odoo_record': None + }, + ], + } + """ + by_line = {} + for model in model_keys: + # list of messages + by_model = json_report.get(model, {}) + for key in report_keys: + by_line.setdefault(key, {}) + for item in by_model.get(key, []): + by_line[key].setdefault(item["line_nr"], []).append(item) + return by_line + + def _report_line_by_model_and_status(self, line, line_num, grouped, model_keys): + """Get one column per each pair model-status.""" + for model in model_keys: + for status, lines in grouped.items(): + # get info on current line if any + line_info = lines.get(line_num, {}) + # add the extra report column anyway + line[self._report_make_key(model, status)] = line_info.get("message") + + def _report_line_by_status(self, line, line_num, grouped, model_keys): + """Get one column per each status containing all modelss messages.""" + for status, by_line in grouped.items(): + line_info = by_line.get(line_num, []) + line[self._report_make_key(status)] = "\n".join( + ["{model}: {message}".format(**item) for item in line_info] + ) diff --git a/connector_importer/models/sources/__init__.py b/connector_importer/models/sources/__init__.py new file mode 100644 index 000000000..4d93ac79b --- /dev/null +++ b/connector_importer/models/sources/__init__.py @@ -0,0 +1,3 @@ +from . import source_consumer_mixin +from . import source_mixin +from . import source_csv diff --git a/connector_importer/models/sources/source_consumer_mixin.py b/connector_importer/models/sources/source_consumer_mixin.py new file mode 100644 index 000000000..df240b492 --- /dev/null +++ b/connector_importer/models/sources/source_consumer_mixin.py @@ -0,0 +1,71 @@ +# Author: Simone Orsi +# Copyright 2018 Camptocamp SA +# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html). + +from odoo import api, fields, models + + +class ImportSourceConsumerMixin(models.AbstractModel): + """Source consumer mixin. + + Inheriting models can setup, configure and use import sources. + + Relation towards source records is generic to grant maximum freedom + on which source type to use. + """ + + _name = "import.source.consumer.mixin" + _description = "Import source consumer" + + source_id = fields.Integer(string="Source ID", required=False) + source_model = fields.Selection( + string="Source type", selection="_selection_source_ref_id" + ) + source_ref_id = fields.Reference( + string="Source", + compute="_compute_source_ref_id", + selection="_selection_source_ref_id", + # NOTE: do not store a computed fields.Reference, Odoo crashes + # with an error message "Mixing appels and orange..." when performing + # a self.recompute() on such fields. + store=False, + ) + source_config_summary = fields.Html( + compute="_compute_source_config_summary", readonly=True + ) + + @api.depends("source_model", "source_id") + def _compute_source_ref_id(self): + for item in self: + item.source_ref_id = False + if not item.source_id or not item.source_model: + continue + item.source_ref_id = f"{item.source_model},{item.source_id}" + + @api.model + def _selection_source_ref_id(self): + return [("import.source.csv", "CSV"), ("import.source.csv.std", "Odoo CSV")] + + @api.depends("source_ref_id") + def _compute_source_config_summary(self): + for item in self: + item.source_config_summary = False + if not item.source_ref_id: + continue + item.source_config_summary = item.source_ref_id.config_summary + + def open_source_config(self): + self.ensure_one() + action = self.env[self.source_model].get_formview_action() + action.update( + { + "views": [(self.env[self.source_model].get_config_view_id(), "form")], + "res_id": self.source_id, + "target": "new", + } + ) + return action + + def get_source(self): + """Return the source to the consumer.""" + return self.source_ref_id diff --git a/connector_importer/models/sources/source_csv.py b/connector_importer/models/sources/source_csv.py new file mode 100644 index 000000000..e47728da5 --- /dev/null +++ b/connector_importer/models/sources/source_csv.py @@ -0,0 +1,116 @@ +# Author: Simone Orsi +# Copyright 2018 Camptocamp SA +# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html). + +import base64 + +from odoo import api, fields, models + +from ...utils.import_utils import CSVReader, guess_csv_metadata + + +class CSVSource(models.Model): + _name = "import.source.csv" + _inherit = "import.source" + _description = "CSV import source" + _source_type = "csv" + _reporter_model = "reporter.csv" + + csv_file = fields.Binary("CSV file") + # use these to load file from an FS path + csv_filename = fields.Char("CSV filename") + csv_filesize = fields.Char( + string="CSV filesize", compute="_compute_csv_filesize", readonly=True + ) + # This is for scheduled import via FS path (FTP, sFTP, etc) + csv_path = fields.Char("CSV path") + csv_delimiter = fields.Char(string="CSV delimiter", default=";") + csv_quotechar = fields.Char(string="CSV quotechar", default='"') + csv_encoding = fields.Char(string="CSV Encoding") + csv_rows_from_to = fields.Char( + string="CSV use only a slice of the available lines. " + "Format: $from:$to. " + "NOTE: recommended only for debug/test purpose.", + ) + # Handy fields to get a downloadable example file + example_file_ext_id = fields.Char( + help=( + "You can define example file by creating attachments " + "with an external ID matching the 'import.source.csv' record " + "external ID:\n" + "\t${import.source.csv.ExtID}_example_file\n\n" + "You can also specify your own external ID by filling this field." + ) + ) + example_file_url = fields.Char( + string="Download example file", compute="_compute_example_file_url" + ) + + _csv_reader_klass = CSVReader + + @property + def _config_summary_fields(self): + _fields = super()._config_summary_fields + return _fields + [ + "csv_filename", + "csv_filesize", + "csv_delimiter", + "csv_quotechar", + "csv_encoding", + ] + + def _binary_csv_content(self): + return base64.b64decode(self.csv_file) + + @api.onchange("csv_file") + def _onchange_csv_file(self): + if self.csv_file: + # auto-guess CSV details + meta = guess_csv_metadata(self._binary_csv_content()) + if meta: + self.csv_delimiter = meta["delimiter"] + self.csv_quotechar = meta["quotechar"] + + @api.depends("csv_file") + def _compute_csv_filesize(self): + for item in self: + item.csv_filesize = False + if item.csv_file: + # in v11 binary fields now can return the size of the file + item.csv_filesize = self.with_context(bin_size=True).csv_file + + def _get_lines(self): + # read CSV + reader_args = { + "delimiter": self.csv_delimiter, + "encoding": self.csv_encoding, + "rows_from_to": self.csv_rows_from_to, + } + if self.csv_path: + # TODO: join w/ filename + reader_args["filepath"] = self.csv_path + elif self.csv_file: + reader_args["filedata"] = base64.decodebytes(self.csv_file) + else: + return iter([]) + + reader = self._csv_reader_klass(**reader_args) + return reader.read_lines() + + def _get_example_attachment(self): + self.ensure_one() + xmlid = self.example_file_ext_id + if not xmlid: + source_xmlid = self.get_external_id()[self.id] + if not source_xmlid: + return + xmlid = f"{source_xmlid}_example_file" + return self.env.ref(xmlid, raise_if_not_found=0) + + @api.depends("example_file_ext_id") + def _compute_example_file_url(self): + for source in self: + source.example_file_url = False + att = source._get_example_attachment() + if att: + source.example_file_url = f"/web/content/{att.id}/{att.name}" diff --git a/connector_importer/models/sources/source_mixin.py b/connector_importer/models/sources/source_mixin.py new file mode 100644 index 000000000..d6bf8b94b --- /dev/null +++ b/connector_importer/models/sources/source_mixin.py @@ -0,0 +1,128 @@ +# Author: Simone Orsi +# Copyright 2018 Camptocamp SA +# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html). + +from odoo import api, fields, models + +from ...utils.import_utils import gen_chunks + + +class ImportSource(models.AbstractModel): + """Define a source for an import. + + A source model is responsible for: + + * storing specific settings (chunk size, source params, etc) + * retrieve source lines (connect to an external service, or db or read CSV) + * yield lines in chunks + * display configuration summary on the recordset (via config summary) + * optionally, provide a reporter to create an extensive report for users. + """ + + _name = "import.source" + _description = "Import source" + _source_type = "none" + _reporter_model = "" + + name = fields.Char(compute="_compute_name") + chunk_size = fields.Integer(required=True, default=500, string="Chunks Size") + config_summary = fields.Html(compute="_compute_config_summary") + + # tmpl that renders configuration summary + _config_summary_template = "connector_importer.source_config_summary" + + def _compute_name(self): + self.name = self._source_type + + @property + def _config_summary_fields(self): + """Fields automatically included in the summary. + + Override it to add your custom fields automatically to the summary. + """ + return ["chunk_size"] + + @api.depends() + def _compute_config_summary(self): + """Generate configuration summary HTML. + + Configurations parameters can vary depending on the kind of source. + To display meaningful information on the recordset + w/out hacking the recordset view each time + we generate a short HTML summary. + + For instance, if you are connecting to an external db + you might want to show DSN, if you are loading a CSV + you might want to show delimiter, quotechar and so on. + + To add your fields automatically to the summary, + just override `_config_summary_fields`. + They'll be automatically included in the summary. + """ + tmpl_xid = self._config_summary_template + qweb = self.env["ir.qweb"].sudo() + for item in self: + item.config_summary = qweb._render(tmpl_xid, item._config_summary_data()) + + def _config_summary_data(self): + """Collect data for summary.""" + return { + "source": self, + "summary_fields": self._config_summary_fields, + "fields_info": self.fields_get(self._config_summary_fields), + } + + @api.model_create_multi + def create(self, vals_list): + res = super().create(vals_list) + # Override to update reference to source on the consumer + for vals, record in zip(vals_list, res, strict=True): + active_model = vals.get( + "active_model", self.env.context.get("active_model") + ) + active_id = vals.get("active_id", self.env.context.get("active_id")) + + if active_model and active_id: + # update reference on consumer + self.env[active_model].browse(active_id).source_id = record.id + return res + + def get_lines(self): + """Retrieve lines to import.""" + self.ensure_one() + # retrieve lines + lines = self._get_lines() + + # sort them + lines_sorted = self._sort_lines(lines) + + # no chunk size means no chunk of lines + if not self.chunk_size: + yield list(lines) + for _i, chunk in enumerate(gen_chunks(lines_sorted, chunksize=self.chunk_size)): + # get out of chunk iterator + yield list(chunk) + + def _get_lines(self): + """Your duty here...""" + raise NotImplementedError() + + def _sort_lines(self, lines): + """Override to customize sorting.""" + return lines + + def get_config_view_id(self): + """Retrieve configuration view.""" + return ( + self.env["ir.ui.view"] + .search([("model", "=", self._name), ("type", "=", "form")], limit=1) + .id + ) + + def get_reporter(self): + """Retrieve a specific reporter for this source. + + A report can be used to produce and extensive report for the end user. + See `reporter` models. + """ + return self.env.get(self._reporter_model) diff --git a/connector_importer/pyproject.toml b/connector_importer/pyproject.toml new file mode 100644 index 000000000..4231d0ccc --- /dev/null +++ b/connector_importer/pyproject.toml @@ -0,0 +1,3 @@ +[build-system] +requires = ["whool"] +build-backend = "whool.buildapi" diff --git a/connector_importer/readme/CONFIGURE.md b/connector_importer/readme/CONFIGURE.md new file mode 100644 index 000000000..caf27c197 --- /dev/null +++ b/connector_importer/readme/CONFIGURE.md @@ -0,0 +1,231 @@ +## Import type + +Import types are the main configuration of the import. They describe +which models you want to import and how to import them. + +Exaple of configuration: + + + Import Product - all in one + product_product_all_in_one + + + - model: product.product + options: + importer: + odoo_unique_key: barcode + mapper: + name: product.product.mapper + + - model: res.partner + options: + importer: + odoo_unique_key: name + override_existing: false + mapper: + name: importer.mapper.dynamic + source_key_prefix: supplier. + source_key_whitelist: supplier.name + default_keys: + supplier_rank: 1 + + - model: product.supplierinfo + options: + importer: + odoo_unique_key: name + mapper: + name: product.supplierinfo.mapper + source_key_prefix: supplier. + + + + + +In this example we have 3 models to import one after the other using the +same source file: + +- product.product +- res.partner +- product.supplierinfo + +The import will run in the order of the configuration: first +product.product, then res.partner and finally product.supplierinfo. For +each model we have a configuration that describes how to import the +data. With the `options` key we can define the configuration of the +import for each component: `importer`, `mapper`, `record_handler`, +`tracking_handler`. + +The are 4 main components in the import configuration: + +- importer +- mapper +- record_handler +- tracking_handler + +Each of them is responsible for a specific part of the import. + +## The importer + +`importer` is the main component that will import the data. It will use +the `mapper` to map the data from the source to the destination model. +If no `name` is defined the importer will use the default importer for +the model which is capable of importing any model. Most of the time you +don't need a specific importer. + +As the importer is the main component of the import if you want to +customize it you'll have to declare it at an higher level, next to the +`options` key: + + - model: product.product + importer: + name: product.product.importer + options: + mapper: + name: product.product.mapper + +The importer accepts the following options: + +- `odoo_unique_key`: the field that will be used to find the record in + Odoo. If the record is found it will be updated, otherwise it will be + created. + + > NOTE: the value in the column declared as `odoo_unique_key` will be + > treated as xid only if the name of the column is `ìd` or if it + > starts with `xid::`. + +- `break_on_error`: if set to True the import will stop if an error + occurs. Default is False. + +- `override_existing`: if set to True the existing records will be + updated. Default is True. + +- `translation_key_sep`: the separator used to split the translation + key. Default is `:`. See below for information about translation keys. + +- `translation_use_regional_lang`: if set to True the importer will use + the regional language, eg: fr_CH vs fr. + +- `ctx`: a dictionary of values to inject in the context of the import. + +- `write_only`: if set to True the importer will not create new records, + it will only update existing ones. Default is False. + +## The mapper + +The mapper is the component that will map the data from the source to +the destination model. + +The most flexible mapper is the `importer.mapper.dynamic` that will map +the data based on the model introspection and some options that you can +define. The dynamic mapper accepts the following options: + +- `name`: the name of the mapper to use. If no name is defined the + default mapper for the model will be used. +- `source_key_prefix`: a prefix to add to the source key. This is useful + when you want to map the same source key to different destination + fields. +- `source_key_whitelist`: a list of source keys to import. If not + defined all the keys will be imported. +- `source_key_blacklist`: a list of source keys to exclude from the + import. +- `source_key_rename`: a dictionary of source keys to rename. The key is + the source key and the value is the new key. +- `default_keys`: a dictionary of default values to set on the + destination record. The key is the field name and the value is the + default value. +- `translation_keys`: a list of keys that will be used to translate the + data. See below for information about translation keys. +- `required_keys`: a list of keys that are required. If one of the keys + is missing the record will be skipped. Please refer to the + documentation of the mapper to see advanced options. + +Considering the example above: + + - model: product.product + options: + mapper: + name: importer.mapper.dynamic + source_key_prefix: supplier. + source_key_whitelist: supplier.name + default_keys: + supplier_rank: 1 + +The mapper will: + +- import only keys starting with `supplier.` ignoring the rest +- import only the key `supplier.name` +- set the default value of `supplier_rank` to 1 + +## The record_handler + +The record handler is the component that will handle the record create +or update in Odoo. This component is responsible for: + +- finding the record in Odoo +- creating the record if not found +- updating the record if found +- handling the translations + +If no `name` is defined the importer will use the default record handler +for the model which is capable of handling any model. If you want to +customize the record handler you'll have to declare it at an higher +level, next to the `options` key: + + - model: product.product + options: + record_handler: + name: product.product.record_handler + +To find the record in Odoo the record handler will use the +`odoo_unique_key` if defined in the importer otherwise it will fallback +to the matching domain. See below. + +The record handler accepts the following options: + +- `name`: the name of the record handler to use. If no name is defined + the default record handler for the model will be used. + +- `match_domain`: a domain to match the record in Odoo. When no + odoo_unique_key is provided by the importer you must provide a + match_domain. + + > This key accepts a snippet returning a domain. The snippet will be + > evaluated in the context of the import and will receive: + > + > - `orig_values`: the values from the source + > + > - `values`: values computed by the mapper for the record + > + > - `env` + > + > - `user` + > + > - `datetime` + > + > - `dateutil` + > + > - `time` + > + > - `ref_id`: a function to get a record ID from a reference + > + > - `ref`: a function to get a record from a reference + > + > > Example: + > > + > > match_domain: | + > > [('name', '=', values.get('name'))] + +- `must_generate_xmlid`: if set to True the importer will generate an + XML ID for the record. Default is True if the unique key is an xmlid. + +- `skip_fields_unchanged`: if set to True the importer will skip the + fields that are unchanged. Default is False. + +## Translations + +The importer can translate the data using the translation keys. The +translation keys are a list of keys (column) that will be handled as +translatable. Whenever a key is found in the translation keys the +importer will look for a column with the same name suffixed by the +language code (eg: name:fr_CH). If the column is found the importer will +translate the data using the language code as context. diff --git a/connector_importer/readme/CONTRIBUTORS.md b/connector_importer/readme/CONTRIBUTORS.md new file mode 100644 index 000000000..f31aa5114 --- /dev/null +++ b/connector_importer/readme/CONTRIBUTORS.md @@ -0,0 +1,8 @@ +Simone Orsi (Camptocamp) for the original implementation. + +Other contributors include: + +- Guewen Baconnier (Camptocamp) +- Mykhailo Panarin (Camptocamp) +- Sébastien Alix (Camptocamp) +- Thien Vo (Trobz) diff --git a/connector_importer/readme/CREDITS.md b/connector_importer/readme/CREDITS.md new file mode 100644 index 000000000..57e03a9fe --- /dev/null +++ b/connector_importer/readme/CREDITS.md @@ -0,0 +1 @@ +The migration of this module from 16.0 to 18.0 was financially supported by Camptocamp. diff --git a/connector_importer/readme/DESCRIPTION.md b/connector_importer/readme/DESCRIPTION.md new file mode 100644 index 000000000..a27290ed5 --- /dev/null +++ b/connector_importer/readme/DESCRIPTION.md @@ -0,0 +1,9 @@ +This module allows to import / update records from files using the connector +framework and job queue. + +To run an import you need at least: + +* a backend, hosts the global configuration of the import. +* a recordset, hosts the configuration of the import for specific models and source +* a source, provides the data to import +* an import type, describes which models you want to import and how to import them diff --git a/connector_importer/readme/ROADMAP.md b/connector_importer/readme/ROADMAP.md new file mode 100644 index 000000000..bd77fba79 --- /dev/null +++ b/connector_importer/readme/ROADMAP.md @@ -0,0 +1,15 @@ +- with the import of standard Odoo CSV files, a concurrency error occurs + when updating the report_data of import_recordset table (from the + importer: self.\_do_report() -\> self.recordset.set_report(...)). The + job is automatically retried a second time (without concurrency + errors). For small files it's not a big issue, but for files with a + huge amount of lines it takes time to process them two times. +- move generic functions from utils.mapper_utils to the connector module +- unit tests for record handler and tracker +- add more test coverage for mapper utils and dynamic mapper +- consider making dynamic mapper the default one +- control how to generate xid (eg: from a specicic field with key + must_generate_xmlid_from_key) +- add manual control for backend_to_rel mappers +- refactor source to be a specific m2o to ease mgmt instead of a generic + relation diff --git a/connector_importer/security/ir.model.access.csv b/connector_importer/security/ir.model.access.csv new file mode 100644 index 000000000..95e38f385 --- /dev/null +++ b/connector_importer/security/ir.model.access.csv @@ -0,0 +1,11 @@ +id,name,model_id:id,group_id:id,perm_read,perm_write,perm_create,perm_unlink +access_import_backend,connector_importer.access_import_backend,model_import_backend,connector.group_connector_manager,1,1,1,1 +access_import_recordset,connector_importer.access_import_recordset,model_import_recordset,connector.group_connector_manager,1,1,1,1 +access_import_record,connector_importer.access_import_record,model_import_record,connector.group_connector_manager,1,1,1,1 +access_import_type,connector_importer.access_import_type,model_import_type,connector.group_connector_manager,1,1,1,1 +access_import_souce_csv,connector_importer.access_import_source_csv,model_import_source_csv,connector.group_connector_manager,1,1,1,1 +access_import_backend_user,connector_importer.access_import_backend_user,model_import_backend,connector_importer.group_importer_user,1,0,0,0 +access_import_recordset_user,connector_importer.access_import_recordset_user,model_import_recordset,connector_importer.group_importer_user,1,0,0,0 +access_import_type_user,connector_importer.access_import_type_user,model_import_type,connector_importer.group_importer_user,1,0,0,0 +access_import_souce_csv_user,connector_importer.access_import_source_csv_user,model_import_source_csv,connector_importer.group_importer_user,1,0,0,0 +access_connector_queue_job_user,connector job user,connector.model_queue_job,connector_importer.group_importer_user,1,0,0,0 diff --git a/connector_importer/security/security.xml b/connector_importer/security/security.xml new file mode 100644 index 000000000..dec8ff681 --- /dev/null +++ b/connector_importer/security/security.xml @@ -0,0 +1,16 @@ + + + + Importer + 20 + + + Connector importer user + + + + + diff --git a/connector_importer/static/description/icon.png b/connector_importer/static/description/icon.png new file mode 100644 index 000000000..4d8db6ec7 Binary files /dev/null and b/connector_importer/static/description/icon.png differ diff --git a/connector_importer/static/description/icon.svg b/connector_importer/static/description/icon.svg new file mode 100644 index 000000000..02c4d2c6c --- /dev/null +++ b/connector_importer/static/description/icon.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/connector_importer/static/description/index.html b/connector_importer/static/description/index.html new file mode 100644 index 000000000..abc903932 --- /dev/null +++ b/connector_importer/static/description/index.html @@ -0,0 +1,719 @@ + + + + + +Connector Importer + + + +
+

Connector Importer

+ + +

Beta License: AGPL-3 OCA/connector-interfaces Translate me on Weblate Try me on Runboat

+

This module allows to import / update records from files using the +connector framework and job queue.

+

To run an import you need at least:

+
    +
  • a backend, hosts the global configuration of the import.
  • +
  • a recordset, hosts the configuration of the import for specific +models and source
  • +
  • a source, provides the data to import
  • +
  • an import type, describes which models you want to import and how to +import them
  • +
+

Table of contents

+ +
+

Configuration

+
+

Import type

+

Import types are the main configuration of the import. They describe +which models you want to import and how to import them.

+

Exaple of configuration:

+
+<record id="import_type_product_product_all_in_one" model="import.type">
+    <field name="name">Import Product - all in one</field>
+    <field name="key">product_product_all_in_one</field>
+    <field name="options">
+
+- model: product.product
+    options:
+        importer:
+            odoo_unique_key: barcode
+        mapper:
+            name: product.product.mapper
+
+- model: res.partner
+    options:
+        importer:
+            odoo_unique_key: name
+        override_existing: false
+        mapper:
+            name: importer.mapper.dynamic
+            source_key_prefix: supplier.
+            source_key_whitelist: supplier.name
+            default_keys:
+                supplier_rank: 1
+
+- model: product.supplierinfo
+    options:
+        importer:
+            odoo_unique_key: name
+        mapper:
+            name: product.supplierinfo.mapper
+            source_key_prefix: supplier.
+
+    </field>
+
+</record>
+
+

In this example we have 3 models to import one after the other using the +same source file:

+
    +
  • product.product
  • +
  • res.partner
  • +
  • product.supplierinfo
  • +
+

The import will run in the order of the configuration: first +product.product, then res.partner and finally product.supplierinfo. For +each model we have a configuration that describes how to import the +data. With the options key we can define the configuration of the +import for each component: importer, mapper, record_handler, +tracking_handler.

+

The are 4 main components in the import configuration:

+
    +
  • importer
  • +
  • mapper
  • +
  • record_handler
  • +
  • tracking_handler
  • +
+

Each of them is responsible for a specific part of the import.

+
+
+

The importer

+

importer is the main component that will import the data. It will +use the mapper to map the data from the source to the destination +model. If no name is defined the importer will use the default +importer for the model which is capable of importing any model. Most of +the time you don’t need a specific importer.

+

As the importer is the main component of the import if you want to +customize it you’ll have to declare it at an higher level, next to the +options key:

+
+- model: product.product
+    importer:
+        name: product.product.importer
+    options:
+        mapper:
+            name: product.product.mapper
+
+

The importer accepts the following options:

+
    +
  • odoo_unique_key: the field that will be used to find the record +in Odoo. If the record is found it will be updated, otherwise it will +be created.

    +
    +

    NOTE: the value in the column declared as odoo_unique_key will +be treated as xid only if the name of the column is ìd or if +it starts with xid::.

    +
    +
  • +
  • break_on_error: if set to True the import will stop if an error +occurs. Default is False.

    +
  • +
  • override_existing: if set to True the existing records will be +updated. Default is True.

    +
  • +
  • translation_key_sep: the separator used to split the translation +key. Default is :. See below for information about translation +keys.

    +
  • +
  • translation_use_regional_lang: if set to True the importer will +use the regional language, eg: fr_CH vs fr.

    +
  • +
  • ctx: a dictionary of values to inject in the context of the +import.

    +
  • +
  • write_only: if set to True the importer will not create new +records, it will only update existing ones. Default is False.

    +
  • +
+
+
+

The mapper

+

The mapper is the component that will map the data from the source to +the destination model.

+

The most flexible mapper is the importer.mapper.dynamic that will +map the data based on the model introspection and some options that you +can define. The dynamic mapper accepts the following options:

+
    +
  • name: the name of the mapper to use. If no name is defined the +default mapper for the model will be used.
  • +
  • source_key_prefix: a prefix to add to the source key. This is +useful when you want to map the same source key to different +destination fields.
  • +
  • source_key_whitelist: a list of source keys to import. If not +defined all the keys will be imported.
  • +
  • source_key_blacklist: a list of source keys to exclude from the +import.
  • +
  • source_key_rename: a dictionary of source keys to rename. The key +is the source key and the value is the new key.
  • +
  • default_keys: a dictionary of default values to set on the +destination record. The key is the field name and the value is the +default value.
  • +
  • translation_keys: a list of keys that will be used to translate +the data. See below for information about translation keys.
  • +
  • required_keys: a list of keys that are required. If one of the +keys is missing the record will be skipped. Please refer to the +documentation of the mapper to see advanced options.
  • +
+

Considering the example above:

+
+- model: product.product
+    options:
+        mapper:
+            name: importer.mapper.dynamic
+            source_key_prefix: supplier.
+            source_key_whitelist: supplier.name
+            default_keys:
+                supplier_rank: 1
+
+

The mapper will:

+
    +
  • import only keys starting with supplier. ignoring the rest
  • +
  • import only the key supplier.name
  • +
  • set the default value of supplier_rank to 1
  • +
+
+
+

The record_handler

+

The record handler is the component that will handle the record create +or update in Odoo. This component is responsible for:

+
    +
  • finding the record in Odoo
  • +
  • creating the record if not found
  • +
  • updating the record if found
  • +
  • handling the translations
  • +
+

If no name is defined the importer will use the default record +handler for the model which is capable of handling any model. If you +want to customize the record handler you’ll have to declare it at an +higher level, next to the options key:

+
+- model: product.product
+    options:
+        record_handler:
+            name: product.product.record_handler
+
+

To find the record in Odoo the record handler will use the +odoo_unique_key if defined in the importer otherwise it will +fallback to the matching domain. See below.

+

The record handler accepts the following options:

+
    +
  • name: the name of the record handler to use. If no name is +defined the default record handler for the model will be used.

    +
  • +
  • match_domain: a domain to match the record in Odoo. When no +odoo_unique_key is provided by the importer you must provide a +match_domain.

    +
    +

    This key accepts a snippet returning a domain. The snippet will be +evaluated in the context of the import and will receive:

    +
      +
    • orig_values: the values from the source

      +
    • +
    • values: values computed by the mapper for the record

      +
    • +
    • env

      +
    • +
    • user

      +
    • +
    • datetime

      +
    • +
    • dateutil

      +
    • +
    • time

      +
    • +
    • ref_id: a function to get a record ID from a reference

      +
    • +
    • ref: a function to get a record from a reference

      +
      +

      Example:

      +
      +match_domain: |
      +    [('name', '=', values.get('name'))]
      +
      +
      +
    • +
    +
    +
  • +
  • must_generate_xmlid: if set to True the importer will generate an +XML ID for the record. Default is True if the unique key is an xmlid.

    +
  • +
  • skip_fields_unchanged: if set to True the importer will skip the +fields that are unchanged. Default is False.

    +
  • +
+
+
+

Translations

+

The importer can translate the data using the translation keys. The +translation keys are a list of keys (column) that will be handled as +translatable. Whenever a key is found in the translation keys the +importer will look for a column with the same name suffixed by the +language code (eg: name:fr_CH). If the column is found the importer will +translate the data using the language code as context.

+
+
+
+

Known issues / Roadmap

+
    +
  • with the import of standard Odoo CSV files, a concurrency error +occurs when updating the report_data of import_recordset table (from +the importer: self._do_report() -> self.recordset.set_report(…)). +The job is automatically retried a second time (without concurrency +errors). For small files it’s not a big issue, but for files with a +huge amount of lines it takes time to process them two times.
  • +
  • move generic functions from utils.mapper_utils to the connector +module
  • +
  • unit tests for record handler and tracker
  • +
  • add more test coverage for mapper utils and dynamic mapper
  • +
  • consider making dynamic mapper the default one
  • +
  • control how to generate xid (eg: from a specicic field with key +must_generate_xmlid_from_key)
  • +
  • add manual control for backend_to_rel mappers
  • +
  • refactor source to be a specific m2o to ease mgmt instead of a +generic relation
  • +
+
+
+

Bug Tracker

+

Bugs are tracked on GitHub Issues. +In case of trouble, please check there if your issue has already been reported. +If you spotted it first, help us to smash it by providing a detailed and welcomed +feedback.

+

Do not contact contributors directly about support or help with technical issues.

+
+
+

Credits

+
+

Authors

+
    +
  • Camptocamp
  • +
+
+
+

Contributors

+

Simone Orsi (Camptocamp) for the original implementation.

+

Other contributors include:

+
    +
  • Guewen Baconnier (Camptocamp)
  • +
  • Mykhailo Panarin (Camptocamp)
  • +
  • Sébastien Alix (Camptocamp)
  • +
  • Thien Vo (Trobz)
  • +
+
+
+

Other credits

+

The migration of this module from 16.0 to 18.0 was financially supported +by Camptocamp.

+
+
+

Maintainers

+

This module is maintained by the OCA.

+ +Odoo Community Association + +

OCA, or the Odoo Community Association, is a nonprofit organization whose +mission is to support the collaborative development of Odoo features and +promote its widespread use.

+

Current maintainer:

+

simahawk

+

This module is part of the OCA/connector-interfaces project on GitHub.

+

You are welcome to contribute. To learn how please visit https://odoo-community.org/page/Contribute.

+
+
+
+ + diff --git a/connector_importer/tests/__init__.py b/connector_importer/tests/__init__.py new file mode 100644 index 000000000..4af0ae040 --- /dev/null +++ b/connector_importer/tests/__init__.py @@ -0,0 +1,12 @@ +from . import test_backend +from . import test_cron +from . import test_import_type +from . import test_recordset +from . import test_record_importer +from . import test_record_importer_basic +from . import test_record_importer_xmlid +from . import test_record_handler +from . import test_source +from . import test_source_csv +from . import test_mapper +from . import test_event_listeners diff --git a/connector_importer/tests/common.py b/connector_importer/tests/common.py new file mode 100644 index 000000000..d260818b1 --- /dev/null +++ b/connector_importer/tests/common.py @@ -0,0 +1,118 @@ +# Author: Simone Orsi +# Copyright 2018 Camptocamp SA +# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html). + +import logging + +import odoo.tests.common as common +from odoo.tools.misc import file_path + +from odoo.addons.component.tests.common import TransactionComponentRegistryCase + +from ..utils.import_utils import gen_chunks + +# TODO: really annoying when running tests. Remove or find a better way +logging.getLogger("PIL.PngImagePlugin").setLevel(logging.ERROR) +logging.getLogger("passlib.registry").setLevel(logging.ERROR) + + +def _load_filecontent(module, filepath, mode="r"): + path = file_path(module + "/" + filepath) + with open(path, mode) as fd: + return fd.read() + + +class BaseTestCase(common.TransactionCase): + @staticmethod + def load_filecontent(*args, **kwargs): + return _load_filecontent(*args, **kwargs) + + +class MockedSource: + """A fake source for recordsets.""" + + lines = [] + chunks_size = 5 + + def __init__(self, lines, chunk_size=5): + self.lines = lines + self.chunks_size = chunk_size + + def get_lines(self): + return gen_chunks(self.lines, self.chunks_size) + + +def fake_lines(count, keys): + """Generate importable fake lines.""" + res = [] + _item = {}.fromkeys(keys, "") + for i in range(1, count + 1): + item = _item.copy() + for k in keys: + item[k] = f"{k}_{i}" + item["_line_nr"] = i + res.append(item) + return res + + +class TestImporterMixin: + def _setup_components(self): + for mod in self._get_component_modules(): + self._load_module_components(mod) + self._build_components(*self._get_components()) + + def _get_component_modules(self): + return ["connector_importer"] + + def _get_components(self): + return [] + + @classmethod + def _setup_records(cls): + cls.backend = cls.env["import.backend"].create( + # no jobs thanks (I know, we should test this too at some point :)) + {"name": "Foo", "version": "1.0", "debug_mode": True} + ) + cls.import_type = cls.env["import.type"].create( + { + "name": "Fake", + "key": "fake", + "options": """ +- model: res.partner + importer: + name: fake.partner.importer + """, + } + ) + cls.recordset = cls.env["import.recordset"].create( + {"backend_id": cls.backend.id, "import_type_id": cls.import_type.id} + ) + + def _patch_get_source(self, lines, chunk_size=5): + self.env["import.recordset"]._patch_method( + "get_source", lambda x: MockedSource(lines, chunk_size=chunk_size) + ) + + def _fake_lines(self, count, keys=None): + return fake_lines(count, keys=keys or []) + + @staticmethod + def load_filecontent(*args, **kwargs): + return _load_filecontent(*args, **kwargs) + + +class TestImporterBase(TransactionComponentRegistryCase, TestImporterMixin): + @classmethod + def setUpClass(cls): + super().setUpClass() + cls._setup_registry(cls) + cls._setup_records() + + def setUp(self): + super().setUp() + self._setup_components() + + @classmethod + def tearDownClass(cls): + cls._teardown_registry(cls) + return super().tearDownClass() diff --git a/connector_importer/tests/fake_components.py b/connector_importer/tests/fake_components.py new file mode 100644 index 000000000..dbb6ac14b --- /dev/null +++ b/connector_importer/tests/fake_components.py @@ -0,0 +1,78 @@ +# Author: Simone Orsi +# Copyright 2018 Camptocamp SA +# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html). + +from odoo.addons.component.core import Component + + +class PartnerMapper(Component): + _name = "fake.partner.mapper" + _inherit = "importer.base.mapper" + _apply_on = "res.partner" + + required = {"fullname": "name", "id": "ref"} + + defaults = [("is_company", False)] + + direct = [("id", "ref"), ("fullname", "name")] + + def finalize(self, map_record, values): + res = super().finalize(map_record, values) + # allow easy simulation of broken import + if self.env.context.get("_test_break_import"): + raise ValueError(self.env.context.get("_test_break_import")) + return res + + +class PartnerRecordImporter(Component): + _name = "fake.partner.importer" + _inherit = "importer.record" + _apply_on = "res.partner" + + odoo_unique_key = "ref" + + def create_context(self): + return {"tracking_disable": True} + + write_context = create_context + + +# Same component but with the "id" source column handled as an XML-ID + + +class PartnerMapperXMLID(Component): + _name = "fake.partner.mapper.xmlid" + _inherit = "importer.base.mapper" + _apply_on = "res.partner" + + required = {"fullname": "name"} + + defaults = [("is_company", False)] + + direct = [("id", "id"), ("id", "ref"), ("fullname", "name")] + + +class PartnerRecordImporterXMLID(Component): + _name = "fake.partner.importer.xmlid" + _inherit = "importer.record" + _apply_on = "res.partner" + + odoo_unique_key = "id" + + def create_context(self): + return {"tracking_disable": True} + + def prepare_line(self, line): + res = super().prepare_line(line) + res["id"] = "__import__." + line["id"] + return res + + write_context = create_context + + +class FakeModelMapper(Component): + _name = "fake.model.mapper" + _inherit = "importer.base.mapper" + _apply_on = "fake.imported.model" + + direct = [("fullname", "name")] diff --git a/connector_importer/tests/fake_models.py b/connector_importer/tests/fake_models.py new file mode 100644 index 000000000..59468ee69 --- /dev/null +++ b/connector_importer/tests/fake_models.py @@ -0,0 +1,47 @@ +# Author: Simone Orsi +# Copyright 2018 Camptocamp SA +# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html). + +from odoo import fields, models + + +class FakeSourceConsumer(models.Model): + _name = "fake.source.consumer" + _description = _name + _inherit = "import.source.consumer.mixin" + _description = "Fake source consumer" + + name = fields.Char() + + +class FakeSourceStatic(models.Model): + _name = "fake.source.static" + _description = _name + _inherit = "import.source" + _source_type = "static" + _description = "Fake static source" + + fake_param = fields.Char() + + @property + def _config_summary_fields(self): + return super()._config_summary_fields + ["fake_param"] + + def _get_lines(self): + for i in range(1, 21): + yield { + "id": i, + "fullname": f"Fake line #{i}", + "address": f"Some fake place, {i}", + } + + def _sort_lines(self, lines): + return reversed(list(lines)) + + +class FakeImportedModel(models.Model): + _name = "fake.imported.model" + _description = _name + _description = "Fake model" + + name = fields.Char() diff --git a/connector_importer/tests/fixtures/csv_source_test1.csv b/connector_importer/tests/fixtures/csv_source_test1.csv new file mode 100644 index 000000000..88c3cc993 --- /dev/null +++ b/connector_importer/tests/fixtures/csv_source_test1.csv @@ -0,0 +1,6 @@ +id,fullname +1,Marty McFly +2,Biff Tannen +3,Emmet Brown +4,Clara Clayton +5,George McFly diff --git a/connector_importer/tests/test_backend.py b/connector_importer/tests/test_backend.py new file mode 100644 index 000000000..02d5e3f63 --- /dev/null +++ b/connector_importer/tests/test_backend.py @@ -0,0 +1,43 @@ +# Author: Simone Orsi +# Copyright 2018 Camptocamp SA +# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html). + +import odoo.tests.common as common +from odoo.tools.misc import mute_logger + + +class TestBackend(common.TransactionCase): + @classmethod + def setUpClass(cls): + super().setUpClass() + cls.backend_model = cls.env["import.backend"] + + def test_backend_create(self): + bknd = self.backend_model.create({"name": "Foo", "version": "1.0"}) + self.assertTrue(bknd) + + @mute_logger("odoo.models.unlink") + def test_backend_cron_cleanup_recordsets(self): + # create a backend + bknd = self.backend_model.create( + {"name": "Foo", "version": "1.0", "cron_cleanup_keep": 3} + ) + itype = self.env["import.type"].create({"name": "Fake", "key": "fake"}) + # and 5 recorsets + for x in range(5): + rec = self.env["import.recordset"].create( + {"backend_id": bknd.id, "import_type_id": itype.id} + ) + # make sure create date is increased + rec.create_date = "2018-01-01 00:00:0" + str(x) + self.assertEqual(len(bknd.recordset_ids), 5) + # clean them up + bknd.cron_cleanup_recordsets() + recsets = bknd.recordset_ids.mapped("name") + # we should find only 3 records and #1 and #2 gone + self.assertEqual(len(recsets), 3) + self.assertNotIn("Foo #1", recsets) + self.assertNotIn("Foo #2", recsets) + + # TODO + # def test_job_running_unlink_lock(self): diff --git a/connector_importer/tests/test_cron.py b/connector_importer/tests/test_cron.py new file mode 100644 index 000000000..e187d9083 --- /dev/null +++ b/connector_importer/tests/test_cron.py @@ -0,0 +1,43 @@ +# Author: Simone Orsi +# Copyright 2018 Camptocamp SA +# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html). + + +import odoo.tests.common as common +from odoo import fields + + +class TestBackendCron(common.TransactionCase): + @classmethod + def setUpClass(cls): + super().setUpClass() + cls.backend_model = cls.env["import.backend"] + cls.bknd = cls.backend_model.create( + { + "name": "Croned one", + "version": "1.0", + "cron_mode": True, + "cron_start_date": "2018-01-01", + "cron_interval_type": "days", + "cron_interval_number": 2, + } + ) + + def test_backend_cron_create(self): + cron = self.bknd.cron_id + expected_nextcall = fields.Datetime.from_string("2018-01-01 00:00:00") + self.assertTrue(cron) + self.assertEqual(cron.nextcall, expected_nextcall) + self.assertEqual(cron.interval_type, "days") + self.assertEqual(cron.interval_number, 2) + self.assertEqual(cron.code, "model.run_cron(%d)" % self.bknd.id) + + def test_backend_cron_update(self): + expected_nextcall = fields.Datetime.from_string("2018-05-01") + self.bknd.write( + {"cron_start_date": expected_nextcall, "cron_interval_type": "weeks"} + ) + cron = self.bknd.cron_id + self.assertTrue(cron) + self.assertEqual(cron.nextcall, expected_nextcall) + self.assertEqual(cron.interval_type, "weeks") diff --git a/connector_importer/tests/test_event_listeners.py b/connector_importer/tests/test_event_listeners.py new file mode 100644 index 000000000..c00bbc39a --- /dev/null +++ b/connector_importer/tests/test_event_listeners.py @@ -0,0 +1,195 @@ +# Author: Simone Orsi +# Copyright 2023 Camptocamp SA +# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html). + +from unittest import mock + +from odoo_test_helper import FakeModelLoader + +from odoo.tools import mute_logger + +from odoo.addons.component.core import WorkContext + +from .common import TestImporterBase + +MOD_PATH = "odoo.addons.connector_importer" +LISTENER_PATH = MOD_PATH + ".components.listeners.ImportRecordsetEventListener" +MOCKED_LOG_ENTRIES = [] + + +class TestRecordImporter(TestImporterBase): + @classmethod + def setUpClass(cls): + super().setUpClass() + cls.loader = FakeModelLoader(cls.env, cls.__module__) + cls.loader.backup_registry() + # fmt: off + from .fake_models import FakeImportedModel + cls.loader.update_registry((FakeImportedModel,)) + cls.fake_imported_model = cls.env[FakeImportedModel._name] + # fmt: on + # generate 20 records + cls.fake_lines = cls._fake_lines(cls, 20, keys=("id", "fullname")) + cls.action_recset = cls.env["ir.actions.server"].create( + { + "name": "Run after import - recordset", + "model_id": cls.env.ref("connector_importer.model_import_recordset").id, + "state": "code", + "code": """ +msg = "Exec for recordset: " + str(recordset.id) +log(msg) + """, + } + ) + cls.action_partner = cls.env["ir.actions.server"].create( + { + "name": "Run after import - partner", + "model_id": cls.env.ref("base.model_res_partner").id, + "state": "code", + "code": """ +msg = "Exec for recordset: " + str(env.context["import_recordset_id"]) +msg += ". Partners: " + str(records.ids) +log(msg) + """, + } + ) + cls.import_type.write( + { + "options": f""" +- model: res.partner + importer: + name: fake.partner.importer +- model: {FakeImportedModel._name} + options: + record_handler: + match_domain: "[('name', '=', values['name'])]" + """ + } + ) + + @classmethod + def tearDownClass(cls): + cls.loader.restore_registry() + super().tearDownClass() + + def setUp(self): + super().setUp() + # The components registry will be handled by the + # `import.record.import_record()' method when initializing its + # WorkContext + self.record = self.env["import.record"].create( + {"recordset_id": self.recordset.id} + ) + self.record.set_data(self.fake_lines) + global MOCKED_LOG_ENTRIES + MOCKED_LOG_ENTRIES = [] + + def _get_components(self): + from .fake_components import ( + FakeModelMapper, + PartnerMapper, + PartnerRecordImporter, + ) + + return [PartnerRecordImporter, PartnerMapper, FakeModelMapper] + + @mute_logger("[importer]") + def test_server_action_no_trigger(self): + with mock.patch(LISTENER_PATH + "._add_after_commit_hook") as mocked: + self.record.run_import() + mocked.assert_not_called() + + @mute_logger("[importer]") + def test_server_action_trigger_last_1_action(self): + self.recordset.server_action_ids += self.action_recset + self.recordset.server_action_trigger_on = "last_importer_done" + mocked_hook = mock.patch(LISTENER_PATH + "._add_after_commit_hook") + with mocked_hook as mocked: + self.record.run_import() + self.assertEqual(mocked.call_count, 1) + self.assertEqual( + mocked.call_args[0], + (self.recordset.id, self.action_recset.id, [self.recordset.id]), + ) + + @mute_logger("[importer]") + def test_server_action_trigger_last_2_actions(self): + self.recordset.server_action_ids += self.action_recset + self.recordset.server_action_ids += self.action_partner + self.recordset.server_action_trigger_on = "last_importer_done" + mocked_hook = mock.patch(LISTENER_PATH + "._add_after_commit_hook") + with mocked_hook as mocked: + self.record.run_import() + self.assertEqual(mocked.call_count, 2) + partner_report = self.recordset.get_report_by_model("res.partner") + record_ids = sorted( + set(partner_report["created"] + partner_report["updated"]) + ) + self.assertEqual( + mocked.call_args_list[0][0], + (self.recordset.id, self.action_partner.id, record_ids), + ) + self.assertEqual( + mocked.call_args_list[1][0], + (self.recordset.id, self.action_recset.id, [self.recordset.id]), + ) + + @mute_logger("[importer]") + def test_server_action_trigger_each(self): + self.recordset.server_action_ids += self.action_recset + self.recordset.server_action_trigger_on = "each_importer_done" + mocked_hook = mock.patch(LISTENER_PATH + "._add_after_commit_hook") + with mocked_hook as mocked: + self.record.run_import() + self.assertEqual(mocked.call_count, 2) + + @staticmethod + def _mocked_get_eval_context(self, orig_meth, action=None): + global MOCKED_LOG_ENTRIES + res = orig_meth(action) + res["log"] = lambda x: MOCKED_LOG_ENTRIES.append(x) + return res + + @mute_logger("[importer]") + def test_server_action_call_from_hook(self): + global MOCKED_LOG_ENTRIES + listener = WorkContext( + components_registry=self.comp_registry, + collection=self.backend, + model_name="import.recordset", + ).component_by_name("recordset.event.listener") + record_ids = self.env["res.partner"].search([], limit=10).ids + action = self.action_partner + # When mocking the ctx is not preserved as we pass the action straight. + # Hence, we must replicate the same ctx that will be passed by the listener. + action = action.with_context( + **listener._run_server_action_ctx(self.recordset.id, action.id, record_ids) + ) + orig_meth = action._get_eval_context + mock_eval_ctx = mock.patch.object( + type(self.env["ir.actions.server"]), + "_get_eval_context", + wraps=lambda x: self._mocked_get_eval_context(x, orig_meth, action=action), + spec=True, + ) + with mock_eval_ctx: + listener._run_server_action(self.recordset.id, action.id, record_ids) + self.assertEqual( + MOCKED_LOG_ENTRIES[0], + f"Exec for recordset: {self.recordset.id}. Partners: {str(record_ids)}", + ) + + def test_post_commit_hook_registration(self): + listener = WorkContext( + components_registry=self.comp_registry, + collection=self.backend, + model_name="import.recordset", + ).component_by_name("recordset.event.listener") + listener._add_after_commit_hook( + self.recordset.id, self.action_partner.id, [1, 2, 3] + ) + callback = self.env.cr.postcommit._funcs.pop() + self.assertEqual(callback.func.__name__, "_run_server_action_post_commit") + self.assertEqual( + callback.args, (self.recordset.id, self.action_partner.id, [1, 2, 3]) + ) diff --git a/connector_importer/tests/test_import_type.py b/connector_importer/tests/test_import_type.py new file mode 100644 index 000000000..312208dc9 --- /dev/null +++ b/connector_importer/tests/test_import_type.py @@ -0,0 +1,133 @@ +# Author: Simone Orsi +# Copyright 2018 Camptocamp SA +# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html). + +from psycopg2 import IntegrityError + +import odoo.tests.common as common +from odoo.tools import mute_logger + + +class TestImportType(common.TransactionCase): + @classmethod + def setUpClass(cls): + super().setUpClass() + cls.type_model = cls.env["import.type"] + + @mute_logger("odoo.sql_db") + def test_unique_constrain(self): + self.type_model.create({"name": "Ok", "key": "ok"}) + with self.assertRaises(IntegrityError): + self.type_model.create({"name": "Duplicated Ok", "key": "ok"}) + + def test_available_importers_defaults(self): + options = """ + - model: res.partner + - model: res.users + options: + importer: + baz: True + """ + itype = self.type_model.create({"name": "Ok", "key": "ok", "options": options}) + importers = tuple(itype.available_importers()) + expected = ( + { + "context": {}, + "importer": {"name": "importer.record"}, + "is_last_importer": False, + "model": "res.partner", + "options": { + "importer": {}, + "mapper": {}, + "record_handler": {}, + "tracking_handler": {}, + }, + }, + { + "context": {}, + "importer": {"name": "importer.record"}, + "is_last_importer": True, + "model": "res.users", + "options": { + "importer": {"baz": True}, + "mapper": {}, + "record_handler": {}, + "tracking_handler": {}, + }, + }, + ) + self.assertEqual( + importers, + expected, + ) + + def test_available_importers(self): + options = """ + - model: res.partner + importer: + name: fake.partner.importer + - model: res.users + importer: + name: + user.importer + options: + importer: + baz: True + record_handler: + bar: False + - model: another.one + importer: + name: import.withspaces + context: + foo: True + """ + itype = self.type_model.create({"name": "Ok", "key": "ok", "options": options}) + importers = tuple(itype.available_importers()) + expected = ( + { + "importer": { + "name": "fake.partner.importer", + }, + "model": "res.partner", + "is_last_importer": False, + "context": {}, + "options": { + "importer": {}, + "mapper": {}, + "record_handler": {}, + "tracking_handler": {}, + }, + }, + { + "importer": { + "name": "user.importer", + }, + "model": "res.users", + "is_last_importer": False, + "context": {}, + "options": { + "importer": {"baz": True}, + "mapper": {}, + "record_handler": {"bar": False}, + "tracking_handler": {}, + }, + }, + { + "importer": { + "name": "import.withspaces", + }, + "model": "another.one", + "is_last_importer": True, + "context": {"foo": 1}, + "options": { + "importer": {}, + "mapper": {}, + "record_handler": {}, + "tracking_handler": {}, + }, + }, + ) + self.assertEqual( + importers, + expected, + ) diff --git a/connector_importer/tests/test_mapper.py b/connector_importer/tests/test_mapper.py new file mode 100644 index 000000000..366305585 --- /dev/null +++ b/connector_importer/tests/test_mapper.py @@ -0,0 +1,199 @@ +# Author: Simone Orsi +# Copyright 2018 Camptocamp SA +# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html). + +from odoo.tests.common import RecordCapturer +from odoo.tools import DotDict + +from .common import TestImporterBase + +MOD_PATH = "odoo.addons.connector_importer" +RECORD_MODEL = MOD_PATH + ".models.record.ImportRecord" + + +class TestRecordsetImporter(TestImporterBase): + @classmethod + def _setup_records(cls): + res = super()._setup_records() + cls.record = cls.env["import.record"].create({"recordset_id": cls.recordset.id}) + return res + + def _get_importer(self, options=None): + options = options or {"importer": {}, "mapper": {}} + with self.backend.work_on( + self.record._name, + components_registry=self.comp_registry, + options=DotDict(options), + ) as work: + return work.component_by_name("importer.record", model_name="res.partner") + + def _get_mapper(self, options=None): + return self._get_importer(options=options)._get_mapper() + + def _get_dynamyc_mapper(self, options=None): + opts = {"name": "importer.mapper.dynamic"} + opts.update(options or {}) + return self._get_mapper(options=DotDict({"importer": {}, "mapper": opts})) + + # TODO: test basic mapper and automapper too + + def test_dynamic_mapper_clean_record(self): + mapper = self._get_dynamyc_mapper() + rec = { + "name": "John Doe", + "ref": "12345", + "_foo": "something", + "some_one": 1, + "some_two": 2, + } + expected = { + "name": "John Doe", + "ref": "12345", + "some_one": 1, + "some_two": 2, + } + self.assertEqual(mapper._clean_record(rec), expected) + # Whitelist + mapper = self._get_dynamyc_mapper( + options=dict(source_key_whitelist=["name", "ref"]) + ) + expected = { + "name": "John Doe", + "ref": "12345", + } + self.assertEqual(mapper._clean_record(rec), expected) + # Blacklist + mapper = self._get_dynamyc_mapper(options=dict(source_key_blacklist=["ref"])) + expected = { + "name": "John Doe", + "some_one": 1, + "some_two": 2, + } + self.assertEqual(mapper._clean_record(rec), expected) + # Prefix + mapper = self._get_dynamyc_mapper(options=dict(source_key_prefix="some_")) + expected = { + "some_one": 1, + "some_two": 2, + } + self.assertEqual(mapper._clean_record(rec), expected) + + def test_dynamic_mapper_non_mapped_keys(self): + mapper = self._get_dynamyc_mapper() + rec = { + "name": "John Doe", + "ref": "12345", + "_foo": "something", + "some_one": 1, + "some_two": 2, + } + clean_rec = mapper._clean_record(rec) + expected = ( + "name", + "ref", + "some_one", + "some_two", + ) + self.assertEqual(sorted(mapper._non_mapped_keys(clean_rec)), sorted(expected)) + + def test_dynamic_mapper_values(self): + mapper = self._get_dynamyc_mapper() + rec = {} + expected = {} + self.assertEqual(mapper.dynamic_fields(rec), expected) + mapper = self._get_dynamyc_mapper() + rec = {"name": "John Doe", "ref": "12345"} + expected = rec.copy() + self.assertEqual(mapper.dynamic_fields(rec), expected) + mapper = self._get_dynamyc_mapper() + categs = self.env.ref("base.res_partner_category_0") + self.env.ref( + "base.res_partner_category_2" + ) + rec = { + "name": "John Doe", + "ref": "12345", + "xid::parent_id": "base.res_partner_10", + "xid::category_id": """ + base.res_partner_category_0,base.res_partner_category_2 + """, + "title_id": "Doctor", + } + expected = { + "name": "John Doe", + "ref": "12345", + "parent_id": self.env.ref("base.res_partner_10").id, + "category_id": [(6, 0, categs.ids)], + } + self.assertEqual(mapper.dynamic_fields(rec), expected) + + def test_dynamic_mapper_values_with_prefix(self): + mapper = self._get_dynamyc_mapper(options=dict(source_key_prefix="foo.")) + rec = {} + expected = {} + categs = self.env.ref("base.res_partner_category_0") + self.env.ref( + "base.res_partner_category_2" + ) + rec = { + "foo.name": "John Doe", + "ref": "12345", + "xid::foo.parent_id": "base.res_partner_10", + "xid::foo.category_id": """ + base.res_partner_category_0,base.res_partner_category_2 + """, + "title_id": "Doctor", + } + expected = { + "name": "John Doe", + "parent_id": self.env.ref("base.res_partner_10").id, + "category_id": [(6, 0, categs.ids)], + } + self.assertEqual(mapper.dynamic_fields(rec), expected) + + def test_dynamic_mapper_skip_empty(self): + rec = { + "name": "John Doe", + "ref": "", + } + # Whitelist + expected = { + "name": "John Doe", + } + mapper = self._get_dynamyc_mapper(options=dict(source_key_empty_skip=["ref"])) + self.assertEqual(mapper.dynamic_fields(rec), expected) + + def test_rel_create_if_missing(self): + opts = { + "parent_id": {"create_missing": True}, + "category_id": {"create_missing": True}, + } + mapper = self._get_dynamyc_mapper(options=dict(converter=opts)) + rec = { + "name": "John Doe", + "ref": "12345", + "parent_id": "Parent of J. Doe", + "category_id": "New category", + } + with ( + RecordCapturer(self.env["res.partner"].sudo(), []) as partner_capt, + RecordCapturer(self.env["res.partner.category"].sudo(), []) as cat_capt, + ): + res = mapper.dynamic_fields(rec) + parent = partner_capt.records + cat = cat_capt.records + self.assertEqual(parent.name, "Parent of J. Doe") + self.assertEqual(cat.name, "New category") + self.assertEqual(res["parent_id"], parent.id) + self.assertEqual(res["category_id"], [(6, 0, [cat.id])]) + + def test_dynamic_mapper_rename_keys(self): + rec = { + "another_name": "John Doe", + } + # Whitelist + expected = { + "name": "John Doe", + } + mapper = self._get_dynamyc_mapper( + options=dict(source_key_rename={"another_name": "name"}) + ) + self.assertEqual(mapper.dynamic_fields(rec), expected) diff --git a/connector_importer/tests/test_record_handler.py b/connector_importer/tests/test_record_handler.py new file mode 100644 index 000000000..7c54c19c9 --- /dev/null +++ b/connector_importer/tests/test_record_handler.py @@ -0,0 +1,77 @@ +# Author: Simone Orsi +# Copyright 2023 Camptocamp SA +# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html). + +from odoo.tools import DotDict + +from .common import TestImporterBase + +values = { + "name": "John", + "age": 40, +} +orig_values = { + "Name": "John ", + "Age": "40", +} + + +class TestRecordImporter(TestImporterBase): + @classmethod + def _setup_records(cls): # pylint: disable=missing-return + super()._setup_records() + cls.record = cls.env["import.record"].create({"recordset_id": cls.recordset.id}) + + def _get_components(self): + from .fake_components import PartnerMapper, PartnerRecordImporter + + return [PartnerRecordImporter, PartnerMapper] + + def _get_handler(self): + with self.backend.work_on( + self.record._name, + components_registry=self.comp_registry, + options=DotDict({"record_handler": {}}), + ) as work: + return work.component(usage="odoorecord.handler", model_name="res.partner") + + def test_match_domain(self): + handler = self._get_handler() + domain = handler._odoo_find_domain_from_options(values, orig_values) + self.assertEqual(domain, []) + handler.work.options["record_handler"] = { + "match_domain": """ + [('name', '=', values['name']), ('age', '=', orig_values['Age'])] + """ + } + domain = handler._odoo_find_domain_from_options(values, orig_values) + self.assertEqual( + domain, [("name", "=", values["name"]), ("age", "=", orig_values["Age"])] + ) + + def test_unique_key_domain(self): + handler = self._get_handler() + handler.unique_key = "nowhere" + with self.assertRaises(ValueError): + domain = handler._odoo_find_domain_from_unique_key(values, orig_values) + handler.unique_key = "name" + domain = handler._odoo_find_domain_from_unique_key(values, orig_values) + self.assertEqual(domain, [("name", "=", values["name"])]) + handler.unique_key = "Name" + domain = handler._odoo_find_domain_from_unique_key(values, orig_values) + self.assertEqual(domain, [("Name", "=", orig_values["Name"])]) + + def test_find_domain(self): + handler = self._get_handler() + handler.unique_key = "age" + domain = handler.odoo_find_domain(values, orig_values) + self.assertEqual(domain, [("age", "=", values["age"])]) + handler.work.options["record_handler"] = { + "match_domain": """ + [('name', '=', values['name']), ('age', '=', values['age'])] + """ + } + domain = handler.odoo_find_domain(values, orig_values) + self.assertEqual( + domain, [("name", "=", values["name"]), ("age", "=", values["age"])] + ) diff --git a/connector_importer/tests/test_record_importer.py b/connector_importer/tests/test_record_importer.py new file mode 100644 index 000000000..1a77393f6 --- /dev/null +++ b/connector_importer/tests/test_record_importer.py @@ -0,0 +1,136 @@ +# Author: Simone Orsi +# Copyright 2018 Camptocamp SA +# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html). + +from odoo.tools import mute_logger + +from .common import TestImporterBase + +MOD_PATH = "odoo.addons.connector_importer" +RECORD_MODEL = MOD_PATH + ".models.record.ImportRecord" +LOGGERS_TO_MUTE = ( + "[importer]", + "odoo.addons.queue_job.utils", +) + + +class TestRecordImporter(TestImporterBase): + @classmethod + def setUpClass(cls): + super().setUpClass() + # generate 10 records + cls.fake_lines = cls._fake_lines(cls, 10, keys=("id", "fullname")) + + def setUp(self): + super().setUp() + # The components registry will be handled by the + # `import.record.import_record()' method when initializing its + # WorkContext + self.record = self.env["import.record"].create( + {"recordset_id": self.recordset.id} + ) + + def _get_components(self): + from .fake_components import PartnerMapper, PartnerRecordImporter + + return [PartnerRecordImporter, PartnerMapper] + + @mute_logger(*LOGGERS_TO_MUTE) + def test_importer_create(self): + # set them on record + self.record.set_data(self.fake_lines) + res = self.record.run_import() + report = self.recordset.get_report() + # in any case we'll get this per each model if the import is not broken + model = "res.partner" + expected = { + model: {"created": 10, "errored": 0, "updated": 0, "skipped": 0}, + } + result = res[model] + self.assertEqual(result, expected[model]) + for k, v in expected[model].items(): + self.assertEqual(len(report[model][k]), v) + self.assertEqual(self.env[model].search_count([("ref", "like", "id_%")]), 10) + + @mute_logger(*LOGGERS_TO_MUTE) + def test_importer_create_debug_mode_off(self): + # set them on record + self.record.set_data(self.fake_lines) + self.record.backend_id.debug_mode = False + res = self.record._run_import(use_job=True) + self.recordset.get_report() + # in any case we'll get this per each model if the import is not broken + model = "res.partner" + expected = { + model: {"created": 10, "errored": 0, "updated": 0, "skipped": 0}, + } + delayable = res[model] + result = delayable.perform() + self.assertEqual(result, expected[model]) + result = {model: result} + for k, v in expected[model].items(): + self.assertEqual(result[model][k], v) + self.assertEqual(self.env[model].search_count([("ref", "like", "id_%")]), 10) + + @mute_logger(*LOGGERS_TO_MUTE) + def test_importer_skip(self): + # generate 10 records + lines = self._fake_lines(10, keys=("id", "fullname")) + # make a line skip + lines[0].pop("fullname") + lines[1].pop("id") + # set them on record + self.record.set_data(lines) + res = self.record.run_import() + report = self.recordset.get_report() + model = "res.partner" + expected = {model: {"created": 8, "errored": 0, "updated": 0, "skipped": 2}} + result = res[model] + self.assertEqual(result, expected[model]) + for k, v in expected[model].items(): + self.assertEqual(len(report[model][k]), v) + skipped_msg1 = report[model]["skipped"][0]["message"] + skipped_msg2 = report[model]["skipped"][1]["message"] + self.assertEqual(skipped_msg1, "MISSING REQUIRED SOURCE KEY=fullname: ref=id_1") + # `id` missing, so the destination key `ref` is missing + # so we don't see it in the message + self.assertEqual(skipped_msg2, "MISSING REQUIRED SOURCE KEY=id") + self.assertEqual(self.env[model].search_count([("ref", "like", "id_%")]), 8) + + @mute_logger(*LOGGERS_TO_MUTE) + def test_importer_update(self): + # generate 10 records + lines = self._fake_lines(10, keys=("id", "fullname")) + self.record.set_data(lines) + res = self.record.run_import() + report = self.recordset.get_report() + model = "res.partner" + expected = {model: {"created": 10, "errored": 0, "updated": 0, "skipped": 0}} + result = res[model] + self.assertEqual(result, expected[model]) + for k, v in expected[model].items(): + self.assertEqual(len(report[model][k]), v) + # now run it a second time + # but we must flush the old report which is usually done + # by the recordset importer + self.recordset.set_report({}, reset=True) + res = self.record.run_import() + report = self.recordset.get_report() + expected = {model: {"created": 0, "errored": 0, "updated": 10, "skipped": 0}} + result = res[model] + self.assertEqual(result, expected[model]) + for k, v in expected[model].items(): + self.assertEqual(len(report[model][k]), v) + # now run it a second time + # but we set `override existing` false + self.recordset.set_report({}, reset=True) + report = self.recordset.override_existing = False + res = self.record.run_import() + report = self.recordset.get_report() + expected = {model: {"created": 0, "errored": 0, "updated": 0, "skipped": 10}} + result = res[model] + self.assertEqual(result, expected[model]) + for k, v in expected[model].items(): + self.assertEqual(len(report[model][k]), v) + skipped_msg1 = report[model]["skipped"][0]["message"] + self.assertEqual(skipped_msg1, "ALREADY EXISTS: ref=id_1") diff --git a/connector_importer/tests/test_record_importer_basic.py b/connector_importer/tests/test_record_importer_basic.py new file mode 100644 index 000000000..54d0f7242 --- /dev/null +++ b/connector_importer/tests/test_record_importer_basic.py @@ -0,0 +1,122 @@ +# Author: Simone Orsi +# Copyright 2018 Camptocamp SA +# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html). + +from odoo.tools import DotDict, mute_logger + +from .common import TestImporterBase + + +class TestRecordImporter(TestImporterBase): + @classmethod + def _setup_records(cls): + res = super()._setup_records() + cls.record = cls.env["import.record"].create({"recordset_id": cls.recordset.id}) + return res + + def _get_components(self): + from .fake_components import PartnerMapper, PartnerRecordImporter + + return [PartnerRecordImporter, PartnerMapper] + + def _get_importer(self, options=None): + options = options or {"importer": {}, "mapper": {}} + with self.backend.work_on( + self.record._name, + components_registry=self.comp_registry, + options=DotDict(options), + ) as work: + return work.component(usage="record.importer", model_name="res.partner") + + @mute_logger("[importer]") + def test_importer_lookup(self): + importer = self._get_importer() + self.assertEqual(importer._name, "fake.partner.importer") + + @mute_logger("[importer]") + def test_importer_required_keys(self): + importer = self._get_importer() + required = importer.required_keys() + self.assertDictEqual(required, {"fullname": ("name",), "id": ("ref",)}) + + @mute_logger("[importer]") + def test_importer_check_missing_none(self): + importer = self._get_importer() + values = {"name": "John Doe", "ref": "doe"} + orig_values = {"fullname": "john doe", "id": "#doe"} + missing = importer._check_missing("id", "ref", values, orig_values) + self.assertFalse(missing) + + @mute_logger("[importer]") + def test_importer_check_missing_source(self): + importer = self._get_importer() + values = {"name": "John Doe", "ref": "doe"} + orig_values = {"fullname": "john doe", "id": "#doe"} + fullname = orig_values.pop("fullname") + missing = importer._check_missing("fullname", "name", values, orig_values) + # name is missing now + self.assertDictEqual( + missing, {"message": "MISSING REQUIRED SOURCE KEY=fullname: ref=doe"} + ) + # drop ref + orig_values["fullname"] = fullname + orig_values.pop("id") + missing = importer._check_missing("id", "ref", values, orig_values) + # name is missing now + # `id` missing, so the destination key `ref` is missing + # so we don't see it in the message + self.assertDictEqual( + missing, {"message": "MISSING REQUIRED SOURCE KEY=id: ref=doe"} + ) + + @mute_logger("[importer]") + def test_importer_check_missing_destination(self): + importer = self._get_importer() + values = {"name": "John Doe", "ref": "doe"} + orig_values = {"fullname": "john doe", "id": "#doe"} + name = values.pop("name") + missing = importer._check_missing("fullname", "name", values, orig_values) + # name is missing now + self.assertDictEqual( + missing, {"message": "MISSING REQUIRED DESTINATION KEY=name: ref=doe"} + ) + # drop ref + values["name"] = name + values.pop("ref") + missing = importer._check_missing("id", "ref", values, orig_values) + # name is missing now + # `id` missing, so the destination key `ref` is missing + # so we don't see it in the message + self.assertDictEqual( + missing, {"message": "MISSING REQUIRED DESTINATION KEY=ref"} + ) + + def test_importer_get_mapper(self): + importer = self._get_importer() + mapper = importer._get_mapper() + self.assertEqual(mapper._name, "fake.partner.mapper") + importer.work.options["mapper"] = {"name": "importer.mapper.dynamic"} + mapper = importer._get_mapper() + self.assertEqual(mapper._name, "importer.mapper.dynamic") + importer.work.options["mapper"] = {"usage": "importer.dynamicmapper"} + mapper = importer._get_mapper() + self.assertEqual(mapper._name, "importer.mapper.dynamic") + # name via class attribute have precedence + importer._mapper_name = "fake.partner.mapper" + mapper = importer._get_mapper() + self.assertEqual(mapper._name, "fake.partner.mapper") + + def test_importer_context(self): + importer = self._get_importer( + options={"importer": {"ctx": {"key1": 1, "key2": 2}}, "mapper": {}} + ) + importer._init_importer(self.recordset) + self.assertEqual( + importer._odoo_create_context(), + { + "importer_type_id": self.recordset.import_type_id.id, + "tracking_disable": True, + "key1": 1, + "key2": 2, + }, + ) diff --git a/connector_importer/tests/test_record_importer_xmlid.py b/connector_importer/tests/test_record_importer_xmlid.py new file mode 100644 index 000000000..d602811bf --- /dev/null +++ b/connector_importer/tests/test_record_importer_xmlid.py @@ -0,0 +1,62 @@ +# Author: Simone Orsi +# Copyright 2018 Camptocamp SA +# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html). + +from odoo.tools import mute_logger + +from .common import TestImporterBase + +LOGGERS_TO_MUTE = ( + "[importer]", + "odoo.addons.queue_job.utils", +) + + +class TestRecordImporterXMLID(TestImporterBase): + def setUp(self): + super().setUp() + # The components registry will be handled by the + # `import.record.import_record()' method when initializing its + # WorkContext + self.record = self.env["import.record"].create( + {"recordset_id": self.recordset.id} + ) + + def _get_components(self): + from .fake_components import PartnerMapperXMLID, PartnerRecordImporterXMLID + + return [ + PartnerMapperXMLID, + PartnerRecordImporterXMLID, + ] + + @mute_logger(*LOGGERS_TO_MUTE) + def test_importer_create(self): + self.import_type.write( + { + "options": """ +- model: res.partner + importer: + name: + fake.partner.importer.xmlid + """ + } + ) + # generate 10 records + count = 10 + lines = self._fake_lines(count, keys=("id", "fullname")) + # set them on record + self.record.set_data(lines) + res = self.record.run_import() + report = self.recordset.get_report() + model = "res.partner" + expected = {model: {"created": 10, "errored": 0, "updated": 0, "skipped": 0}} + result = res[model] + self.assertEqual(result, expected[model]) + for k, v in expected[model].items(): + self.assertEqual(len(report[model][k]), v) + self.assertEqual(self.env[model].search_count([("ref", "like", "id_%")]), 10) + # Check XML-IDs + for i in range(1, count + 1): + partner = self.env.ref(f"__import__.id_{i}", raise_if_not_found=False) + self.assertTrue(partner) diff --git a/connector_importer/tests/test_recordset.py b/connector_importer/tests/test_recordset.py new file mode 100644 index 000000000..723c95682 --- /dev/null +++ b/connector_importer/tests/test_recordset.py @@ -0,0 +1,115 @@ +# Author: Simone Orsi +# Copyright 2018 Camptocamp SA +# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html). + +from markupsafe import Markup + +import odoo.tests.common as common + + +class TestRecordset(common.TransactionCase): + @classmethod + def setUpClass(cls): + super().setUpClass() + cls.recordset_model = cls.env["import.recordset"] + cls.backend_model = cls.env["import.backend"] + cls.type_model = cls.env["import.type"] + cls.bknd = cls._create_backend() + cls.itype = cls._create_type() + cls.recordset = cls._create_recordset() + + @classmethod + def _create_backend(cls): + return cls.backend_model.create({"name": "Foo", "version": "1.0"}) + + @classmethod + def _create_type(cls): + return cls.type_model.create( + { + "name": "Ok", + "key": "ok", + "options": """ +- model: res.partner + importer: + name: partner.importer + """, + } + ) + + @classmethod + def _create_recordset(cls): + return cls.recordset_model.create( + {"backend_id": cls.bknd.id, "import_type_id": cls.itype.id} + ) + + def test_recordset_name(self): + self.assertEqual( + self.recordset.name, + "#" + str(self.recordset.id), + ) + + def test_available_importers(self): + """Available models are propagated from import type.""" + self.assertEqual( + tuple(self.recordset.available_importers()), + tuple(self.recordset.import_type_id.available_importers()), + ) + + def test_get_set_raw_report(self): + val = {"baz": "bar"} + # store report + self.recordset.set_report(val) + # retrieve it, should be the same + self.assertEqual(self.recordset.get_report(), val) + new_val = {"foo": "boo"} + # set a new value + self.recordset.set_report(new_val) + merged = val.copy() + merged.update(new_val) + # by default previous value is preserved and merged w/ the new one + self.assertDictEqual(self.recordset.get_report(), merged) + # unless we use `reset` + val = {"goo": "gle"} + # store report + self.recordset.set_report(val, reset=True) + self.assertDictEqual(self.recordset.get_report(), val) + + def test_get_report_html(self): + val = { + "_last_start": "2018-01-20", + "res.partner": { + "errored": list(range(10)), + "skipped": list(range(4)), + "updated": list(range(20)), + "created": list(range(2)), + }, + } + self.recordset.set_report(val) + data = self.recordset._get_report_html_data() + self.assertEqual(data["recordset"], self.recordset) + self.assertEqual(data["last_start"], "2018-01-20") + by_model = data["report_by_model"] + key = list(by_model.keys())[0] + self.assertEqual(key._name, "ir.model") + self.assertEqual(key.model, "res.partner") + self.assertTrue(isinstance(self.recordset.report_html, Markup)) + + def test_importable_models(self): + self.itype.write( + { + "options": """ +- model: res.partner + importer: + name: partner.importer +- model: res.partner.category +- model: res.lang + """ + } + ) + expected = ("res.partner", "res.lang", "res.partner.category") + models = self.recordset.importable_model_ids.mapped("model") + for model in expected: + self.assertIn(model, models) + models = self.recordset.server_action_importable_model_ids.mapped("model") + for model in expected + ("import.recordset",): + self.assertIn(model, models) diff --git a/connector_importer/tests/test_recordset_importer.py b/connector_importer/tests/test_recordset_importer.py new file mode 100644 index 000000000..60c5a70a8 --- /dev/null +++ b/connector_importer/tests/test_recordset_importer.py @@ -0,0 +1,88 @@ +# Author: Simone Orsi +# Copyright 2018 Camptocamp SA +# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html). + +from unittest import mock + +from markupsafe import Markup + +from odoo.tools import mute_logger + +from odoo.addons.queue_job.job import Job + +from .common import TestImporterBase +from .fake_components import PartnerMapper, PartnerRecordImporter + +MOD_PATH = "odoo.addons.connector_importer" +RECORD_MODEL = MOD_PATH + ".models.record.ImportRecord" + + +class TestRecordsetImporter(TestImporterBase): + def _get_components(self): + return [PartnerMapper, PartnerRecordImporter] + + def test_docs_html(self): + self.assertTrue(isinstance(self.recordset.docs_html, Markup)) + + @mute_logger("[importer]") + @mock.patch(f"{RECORD_MODEL}.run_import") + def test_recordset_importer(self, mocked_run_inport): + # generate 100 records + lines = self._fake_lines(100, keys=("id", "fullname")) + # source will provide 5x20 chunks + self._patch_get_source(lines, chunk_size=20) + # run the recordset importer + with self.backend.work_on( + "import.recordset", components_registry=self.comp_registry + ) as work: + importer = work.component(usage="recordset.importer") + self.assertTrue(importer) + importer.run(self.recordset) + mocked_run_inport.assert_called() + # we expect 5 records w/ 20 lines each + records = self.recordset.get_records() + self.assertEqual(len(records), 5) + for rec in records: + data = rec.get_data() + self.assertEqual(len(data), 20) + # order is preserved + data1 = records[0].get_data() + self.assertEqual(data1[0]["id"], "id_1") + self.assertEqual(data1[0]["fullname"], "fullname_1") + # run it twice and make sure old records are wiped + # run the recordset importer + with self.backend.work_on( + "import.recordset", components_registry=self.comp_registry + ) as work: + importer = work.component(usage="recordset.importer") + self.assertTrue(importer) + importer.run(self.recordset) + # we expect 5 records w/ 20 lines each + records = self.recordset.get_records() + self.assertEqual(len(records), 5) + + @mute_logger("[importer]") + def test_job_state(self): + self.backend.debug_mode = False + # generate 100 records + lines = self._fake_lines(100, keys=("id", "fullname")) + # source will provide 5x20 chunks + self._patch_get_source(lines, chunk_size=20) + self.recordset.run_import() + self.assertFalse(self.recordset.record_ids) + self.assertEqual(self.recordset.job_id.state, "pending") + self.assertEqual(self.recordset.job_state, "pending") + self.assertEqual(self.recordset.jobs_global_state, "no_job") + Job.load(self.env, self.recordset.job_id.uuid).perform() + self.assertTrue(self.recordset.record_ids) + self.assertEqual(self.recordset.jobs_global_state, "pending") + # perform each job in sequence and check global state + records = self.recordset.record_ids + for record in records: + job = Job.load(self.env, record.job_id.uuid) + job.set_done() + job.store() + expected_state = "pending" + if record == records[-1]: + expected_state = "done" + self.assertEqual(self.recordset.jobs_global_state, expected_state) diff --git a/connector_importer/tests/test_source.py b/connector_importer/tests/test_source.py new file mode 100644 index 000000000..b96d03f69 --- /dev/null +++ b/connector_importer/tests/test_source.py @@ -0,0 +1,87 @@ +# Author: Simone Orsi +# Copyright 2018 Camptocamp SA +# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html). + + +from unittest import mock + +from odoo_test_helper import FakeModelLoader + +from .common import BaseTestCase + +MOD_PATH = "odoo.addons.connector_importer.models" +SOURCE_MODEL = MOD_PATH + ".sources.source_consumer_mixin.ImportSourceConsumerMixin" + + +class TestSource(BaseTestCase): + @classmethod + def setUpClass(cls): + super().setUpClass() + cls.loader = FakeModelLoader(cls.env, cls.__module__) + cls.loader.backup_registry() + # fmt: off + from .fake_models import FakeSourceConsumer, FakeSourceStatic + cls.loader.update_registry(( + FakeSourceConsumer, + FakeSourceStatic + )) + # fmt: on + cls.source = cls._create_source() + cls.consumer = cls._create_consumer() + + @classmethod + def tearDownClass(cls): + cls.loader.restore_registry() + return super().tearDownClass() + + @classmethod + def _create_source(cls): + return cls.env["fake.source.static"].create( + {"fake_param": "some_condition", "chunk_size": 5} + ) + + @classmethod + def _create_consumer(cls): + return cls.env["fake.source.consumer"].create({}) + + def test_source_basic(self): + source = self.source + self.assertEqual(source.name, "static") + self.assertItemsEqual( + source._config_summary_fields, ["chunk_size", "fake_param"] + ) + + def test_source_get_lines(self): + source = self.source + lines = list(source.get_lines()) + # 20 records, chunk size 5 + self.assertEqual(len(lines), 4) + # custom sorting: reversed + self.assertEqual(lines[0][0]["id"], 20) + + def test_source_summary_data(self): + source = self.source + data = source._config_summary_data() + self.assertEqual(data["source"], source) + self.assertEqual( + sorted(data["summary_fields"]), sorted(["chunk_size", "fake_param"]) + ) + self.assertIn("chunk_size", data["fields_info"]) + self.assertIn("fake_param", data["fields_info"]) + + def test_config_summary(self): + html = self.source.config_summary + self.assertEqual(html.__class__.__name__, "Markup") + + @mock.patch(SOURCE_MODEL + "._selection_source_ref_id") + def test_consumer_basic(self, _selection_source_ref_id): + # Needed to let `odoo.fields.determine` work properly + _selection_source_ref_id.__name__ = "_selection_source_ref_id" + # enable our fake source + _selection_source_ref_id.return_value = [(self.source._name, "Fake")] + consumer = self.consumer + self.assertFalse(consumer.get_source()) + consumer.update( + {"source_id": self.source.id, "source_model": self.source._name} + ) + self.assertEqual(consumer.get_source(), self.source) diff --git a/connector_importer/tests/test_source_csv.py b/connector_importer/tests/test_source_csv.py new file mode 100644 index 000000000..9631ad39a --- /dev/null +++ b/connector_importer/tests/test_source_csv.py @@ -0,0 +1,95 @@ +# Author: Simone Orsi +# Copyright 2018 Camptocamp SA +# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html). + +import base64 + +from odoo_test_helper import FakeModelLoader + +from odoo.tools import mute_logger + +from .common import BaseTestCase + + +class TestSourceCSV(BaseTestCase): + @classmethod + def setUpClass(cls): + super().setUpClass() + cls.loader = FakeModelLoader(cls.env, cls.__module__) + cls.loader.backup_registry() + # fmt: off + from .fake_models import FakeSourceConsumer + cls.loader.update_registry(( + FakeSourceConsumer, + )) + # fmt: on + cls.source = cls._create_source() + cls.consumer = cls._create_consumer() + + @classmethod + def tearDownClass(cls): + cls.loader.restore_registry() + return super().tearDownClass() + + @classmethod + def _create_source(cls): + filecontent = cls.load_filecontent( + "connector_importer", "tests/fixtures/csv_source_test1.csv", mode="rb" + ) + source = cls.env["import.source.csv"].create( + {"csv_file": base64.encodebytes(filecontent)} + ) + source._onchange_csv_file() + return source + + @classmethod + def _create_consumer(cls): + return cls.env["fake.source.consumer"].create({"name": "Foo"}) + + extra_fields = [ + "chunk_size", + "csv_filesize", + "csv_filename", + "csv_delimiter", + "csv_quotechar", + "csv_encoding", + ] + + @mute_logger("[importer]") + def test_source_basic(self): + source = self.source + self.assertEqual(source.name, "csv") + self.assertItemsEqual(source._config_summary_fields, self.extra_fields) + self.assertEqual(source.csv_delimiter, ",") + self.assertEqual(source.csv_quotechar, '"') + + @mute_logger("[importer]") + def test_source_get_lines(self): + source = self.source + # call private method to skip chunking, pointless here + lines = list(source._get_lines()) + self.assertEqual(len(lines), 5) + self.assertDictEqual( + lines[0], {"id": "1", "fullname": "Marty McFly", "_line_nr": 2} + ) + self.assertDictEqual( + lines[1], {"id": "2", "fullname": "Biff Tannen", "_line_nr": 3} + ) + self.assertDictEqual( + lines[2], {"id": "3", "fullname": "Emmet Brown", "_line_nr": 4} + ) + self.assertDictEqual( + lines[3], {"id": "4", "fullname": "Clara Clayton", "_line_nr": 5} + ) + self.assertDictEqual( + lines[4], {"id": "5", "fullname": "George McFly", "_line_nr": 6} + ) + + def test_source_summary_data(self): + source = self.source + data = source._config_summary_data() + self.assertEqual(data["source"], source) + self.assertItemsEqual(data["summary_fields"], self.extra_fields) + self.assertItemsEqual( + sorted(self.extra_fields), sorted(data["fields_info"].keys()) + ) diff --git a/connector_importer/utils/__init__.py b/connector_importer/utils/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/connector_importer/utils/import_utils.py b/connector_importer/utils/import_utils.py new file mode 100644 index 000000000..6df4f9a4b --- /dev/null +++ b/connector_importer/utils/import_utils.py @@ -0,0 +1,141 @@ +# Author: Simone Orsi +# Copyright 2018 Camptocamp SA +# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html). + +import csv +import io +import time + +from ..log import logger + +try: + from chardet.universaldetector import UniversalDetector +except ImportError: + import logging + + _logger = logging.getLogger(__name__) + _logger.debug("`chardet` lib is missing") + + +def get_encoding(data): + """Try to get encoding incrementally. + + See http://chardet.readthedocs.org/en/latest/usage.html + #example-detecting-encoding-incrementally # noqa + """ + start = time.time() + msg = "detecting file encoding..." + logger.info(msg) + file_like = io.BytesIO(data) + detector = UniversalDetector() + for _i, line in enumerate(file_like): + detector.feed(line) + if detector.done: + break + detector.close() + msg = f"encoding found in {str(time.time() - start)} sec" + msg += str(detector.result) + logger.info(msg) + return detector.result + + +def csv_content_to_file(data, encoding=None): + """Odoo binary fields spit out b64 data.""" + # guess encoding via chardet (LOVE IT! :)) + if not encoding: + encoding_info = get_encoding(data) + encoding = encoding_info["encoding"] + if encoding is None or encoding != "utf-8": + try: + data_str = data.decode(encoding) + except (UnicodeDecodeError, TypeError): + # dirty fallback in case + # we don't spot the right encoding above + for enc in ("utf-16le", "latin-1", "ascii"): + try: + data_str = data.decode(enc) + break + except UnicodeDecodeError: + data_str = data + data_str = data_str.encode("utf-8") + else: + data_str = data + return data_str + + +def guess_csv_metadata(filecontent): + # we don't care about acuracy but we don't to get an unicode error + # when converting to str + encoding = get_encoding(filecontent) + with io.StringIO(str(filecontent, encoding["encoding"])) as ff: + try: + dialect = csv.Sniffer().sniff(ff.readline(), "\t,;") + ff.seek(0) + meta = {"delimiter": dialect.delimiter, "quotechar": dialect.quotechar} + except BaseException: + meta = {} + return meta + + +def read_path(path): + with open(path) as thefile: + return thefile.read() + + +class CSVReader: + """Advanced CSV reader.""" + + def __init__( + self, + filepath=None, + filedata=None, + delimiter="|", + quotechar='"', + encoding=None, + fieldnames=None, + rows_from_to=None, + ): + assert filedata or filepath, "Provide a file path or some file data!" + if filepath: + filedata = read_path(filepath) + self.bdata = csv_content_to_file(filedata, encoding) + self.data = str(self.bdata, "utf-8") + self.delimiter = delimiter + self.quotechar = quotechar + self.encoding = encoding + self.fieldnames = fieldnames + self.rows_from_to = rows_from_to or "" + + def read_lines(self): + """Yields lines and add info to them (like line nr).""" + lines = self.data.splitlines() + if ":" in self.rows_from_to: + header = lines[0] + lines = lines[1:] + _from, _to = self.rows_from_to.split(":") + lines = [ + header, + ] + lines[int(_from or 0) : int(_to or len(lines) + 1)] + reader = csv.DictReader( + lines, + delimiter=str(self.delimiter), + quotechar=str(self.quotechar), + fieldnames=self.fieldnames, + ) + for line in reader: + line["_line_nr"] = reader.line_num + yield line + + +def gen_chunks(iterable, chunksize=10): + """Chunk generator. + + Take an iterable and yield `chunksize` sized slices. + """ + chunk = [] + for i, line in enumerate(iterable): + if i % chunksize == 0 and i > 0: + yield chunk + del chunk[:] + chunk.append(line) + yield chunk diff --git a/connector_importer/utils/mapper_utils.py b/connector_importer/utils/mapper_utils.py new file mode 100644 index 000000000..23880653f --- /dev/null +++ b/connector_importer/utils/mapper_utils.py @@ -0,0 +1,354 @@ +# Author: Simone Orsi +# Copyright 2018 Camptocamp SA +# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html). + +from datetime import datetime + +import pytz + +from odoo import fields +from odoo.tools.misc import str2bool + +from ..log import logger +from ..utils.misc import sanitize_external_id + +FMTS = ("%d/%m/%Y",) + +FMTS_DT = ("%Y-%m-%d %H:%M:%S", "%Y-%m-%d %H:%M:%S.000") + + +def to_date(value, formats=FMTS): + """Convert date strings to odoo format.""" + # pylint: disable=except-pass + for fmt in formats: + try: + value = datetime.strptime(value, fmt).date() + break + except ValueError: + pass + if not isinstance(value, str): + try: + return fields.Date.to_string(value) + except ValueError: + pass + # the value has not been converted, + # maybe because is like 00/00/0000 + # or in another bad format + return None + + +def to_utc_datetime(orig_value, tz="Europe/Rome", formats=FMTS_DT): + """Convert date strings to odoo format respecting TZ.""" + # pylint: disable=except-pass + value = orig_value + local_tz = pytz.timezone(tz) + for fmt in formats: + try: + naive = datetime.strptime(orig_value, fmt) + local_dt = local_tz.localize(naive, is_dst=None) + value = local_dt.astimezone(pytz.utc) + break + except ValueError: + pass + if not isinstance(value, str): + return fields.Datetime.to_string(value) + # the value has not been converted, + # maybe because is like 00/00/0000 + # or in another bad format + return None + + +def to_safe_float(value): + """Safely convert to float.""" + if isinstance(value, float): + return value + if not value: + return 0.0 + try: + return float(value.replace(",", ".")) + except ValueError: + return 0.0 + + +def to_safe_int(value): + """Safely convert to integer.""" + if isinstance(value, int): + return value + if not value: + return 0 + try: + return int(value.replace(",", "").replace(".", "")) + except ValueError: + return 0 + + +CONV_MAPPING = { + "date": to_date, + "utc_date": to_utc_datetime, + "safe_float": to_safe_float, + "safe_int": to_safe_int, + "bool": lambda x: str2bool(x, default=False), +} + + +def convert(field, conv_type, fallback_field=None, pre_value_handler=None, **kw): + """Convert the source field to a defined ``conv_type`` + (ex. str) before returning it. + You can also use predefined converters like 'date'. + Use ``fallback_field`` to provide a field of the same type + to be used in case the base field has no value. + """ + + if conv_type in CONV_MAPPING: + conv_type = CONV_MAPPING[conv_type] + + def modifier(self, record, to_attr): + if field not in record: + # be gentle + logger.warn("Field `%s` missing in line `%s`", field, record["_line_nr"]) + return None + value = record.get(field) + if not value and fallback_field: + value = record[fallback_field] + if pre_value_handler: + value = pre_value_handler(value) + # do not use `if not value` otherwise you override all zero values + if value is None: + return None + return conv_type(value, **kw) + + modifier._from_key = field + return modifier + + +def from_mapping(field, mapping, default_value=None, **kw): + """Convert the source value using a ``mapping`` of values.""" + + def modifier(self, record, to_attr): + value = record.get(field) + return mapping.get(value, default_value) + + modifier._from_key = field + return modifier + + +def concat(field, separator=" ", handler=None, **kw): + """Concatenate values from different fields.""" + + # TODO: `field` is actually a list of fields. + # `field` attribute is required ATM by the base connector mapper and + # `_direct_source_field_name` raises and error if you don't specify it. + # Check if we can get rid of it. + + def modifier(self, record, to_attr): + value = [ + record.get(_field, "") for _field in field if record.get(_field, "").strip() + ] + return separator.join(value) + + modifier._from_key = field + return modifier + + +def xmlid_to_rel(field, sanitize=True, sanitize_default_mod_name=None, **kw): + """Convert xmlids source values to ids.""" + xmlid_to_rel._sanitize = sanitize + xmlid_to_rel._sanitize_default_mod_name = sanitize_default_mod_name + + def _xid_to_record(env, xid): + xid = ( + sanitize_external_id( + xid, default_mod_name=xmlid_to_rel._sanitize_default_mod_name + ) + if xmlid_to_rel._sanitize + else xid + ) + return env.ref(xid, raise_if_not_found=False) + + def modifier(self, record, to_attr): + value = record.get(field) + if value is None: + return None + column = self.model._fields[to_attr] + if column.type.endswith("2many"): + _values = [x.strip() for x in value.split(",") if x.strip()] + values = [] + rec_ids = [] + for xid in _values: + rec = _xid_to_record(self.env, xid) + if rec: + rec_ids.append(rec.id) + values.append((6, 0, rec_ids)) + return values + elif column.type.endswith("many2one"): + # m2o + rec = _xid_to_record(self.env, value) + if rec: + return rec.id + return None + else: + raise ValueError("Destination is not a related field.") + + modifier._from_key = field + return modifier + + +# TODO: consider to move this to mapper base klass +# to ease maintanability and override + + +def backend_to_rel( # noqa: C901 + field, + search_field=None, + search_operator=None, + value_handler=None, + default_search_value=None, + default_search_field=None, + search_value_handler=None, + allowed_length=None, + create_missing=False, + create_missing_handler=None, + **kw, +): + """A modifier intended to be used on the ``direct`` mappings. + + Example:: + + direct = [(backend_to_rel('country', + search_field='code', + default_search_value='IT', + allowed_length=2), 'country_id'),] + + :param field: name of the source field in the record + :param search_field: name of the field to be used for searching + :param search_operator: operator to be used for searching + :param value_handler: a function to manipulate the raw value + before using it. You can use it to strip out none values + that are not none, like '0' instead of an empty string. + :param default_search_value: if the value is none you can provide + a default value to look up + :param default_search_field: if the value is none you can provide + a different field to look up for the default value + :param search_value_handler: a callable to use + to manipulate value before searching + :param allowed_length: enforce a check on the search_value length + :param create_missing: create a new record if not found + :param create_missing_handler: provide an handler + for getting new values for a new record to be created. + """ + + def modifier(self, record, to_attr): + search_value = _get_search_value(self, record, value_handler, field) + column, rel_model = _get_column_and_model(self, to_attr) + + # handle defaults if no search value here + if not search_value: + search_value = _handle_default_search_value() + + # Support Odoo studio fields dynamically. + # When a model is created automatically from Odoo studio + # it gets an `x_name` field which cannot be modified :/ + if ( + not default_search_field + and modifier.search_field not in rel_model._fields + and "x_name" in rel_model._fields + ): + modifier.search_field = "x_name" + if allowed_length and len(search_value) != allowed_length: + return None + # alter search value if handler is given + if search_value and search_value_handler: + search_value = search_value_handler(search_value) + if not search_value: + return None + search_operator = "=" + if column.type.endswith("2many"): + # we need multiple values + search_operator = "in" + if not isinstance(search_value, (list | tuple)): + search_value = [search_value] + if modifier.search_operator: + # override by param + search_operator = modifier.search_operator + + search_args = [(modifier.search_field, search_operator, search_value)] + value = rel_model.search(search_args) + + value = _handle_missing_values( + self, column, value, search_value, rel_model, record, to_attr + ) + + # handle the final value based on col type + return _handle_final_value(column, value) + + def _get_search_value(self, record, value_handler, field): + search_value = record.get(field) + if search_value and value_handler: + search_value = value_handler(self, record, search_value) + return search_value + + def _get_column_and_model(self, to_attr): + column = self.model._fields[to_attr] + rel_model = self.env[column.comodel_name].with_context(active_test=False) + return column, rel_model + + def _handle_default_search_value(): + if default_search_value: + search_value = default_search_value + if default_search_field: + modifier.search_field = default_search_field + return search_value + + def _handle_missing_values( + self, column, value, search_value, rel_model, record, to_attr + ): + if ( + column.type.endswith("2many") + and isinstance(search_value, (list | tuple)) + and not len(search_value) == len(value or []) + ): + # make sure we consider all the values and related records + # that we pass here. + # If one of them is missing we have to create them all before. + # If `create_missing_handler` is given, it must make sure + # to create all the missing records and return existing ones too. + # Typical use case is: product categories. + # If we pass ['Categ1', 'Categ2', 'Categ3'] we want them all, + # and if any of them is missing we might want to create them + # using a `create_missing_handler`. + value = None + + if not value and create_missing: + try: + if create_missing_handler: + value = create_missing_handler(self, rel_model, record) + else: + value = rel_model.create({"name": record[field]}) + except Exception as e: + msg = ( + "`backend_to_rel` failed creation. " + "[model: %s] [line: %s] [to_attr: %s] " + "Error: %s" + ) + logger.error(msg, rel_model._name, record["_line_nr"], to_attr, str(e)) + raise + return value + + def _handle_final_value(column, value): + if value: + if column.type == "many2one": + value = value[0].id + if column.type in ("one2many", "many2many"): + value = [(6, 0, [x.id for x in value])] + else: + return None + return value + + # use method attributes to not mess up the variables' scope. + # If we change the var inside modifier, without this trick + # you get UnboundLocalError, as the variable was never defined. + # Trick tnx to http://stackoverflow.com/a/27910553/647924 + modifier.search_field = search_field or "name" + modifier.search_operator = search_operator or None + modifier._from_key = field + return modifier diff --git a/connector_importer/utils/misc.py b/connector_importer/utils/misc.py new file mode 100644 index 000000000..448f95a7b --- /dev/null +++ b/connector_importer/utils/misc.py @@ -0,0 +1,61 @@ +# Author: Simone Orsi +# Copyright 2022 Camptocamp SA +# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html). + +import base64 +import logging + +from odoo import _ +from odoo.exceptions import UserError +from odoo.tools import DotDict + +_logger = logging.getLogger(__file__) + + +def get_importer_for_config(backend, work_on_model, importer_config, **work_on_kw): + """Retrieve importer component for given backend, model and configuration.""" + # When using jobs, importer_config is loaded from the DB as a pure dict. + # Make sure we always have a dotted dict. + # FIXME: we should pass the import_type_id to the job and load it here. + importer_config = DotDict(importer_config) + work_on_kw.update( + { + "options": importer_config.options, + } + ) + with backend.with_context(**importer_config.context).work_on( + importer_config.model, **work_on_kw + ) as work: + importer_name = importer_config.importer.name + return work.component_by_name(importer_name) + + +def sanitize_external_id(external_id, default_mod_name=None): + """Ensure that the external ID has dotted prefix.""" + if not external_id: + return external_id + id_parts = external_id.split(".", 1) + if len(id_parts) == 2: + if "." in id_parts[1]: + raise UserError( + _( + "The ID reference '%s' must contain maximum one dot (or 0). " + "They are used to refer to other modules ID, " + "in the form: module.record_id" + ) + % (external_id,) + ) + else: + default_mod_name = default_mod_name or "__setup__" + return f"{default_mod_name}.{external_id}" + return external_id + + +def to_b64(file_content): + """Safe convertion to b64""" + try: + # py > 3.9 + return base64.encodestring(file_content) + except AttributeError: + # py <= 3.9 + return base64.b64encode(file_content) diff --git a/connector_importer/utils/report_html.py b/connector_importer/utils/report_html.py new file mode 100644 index 000000000..a7513e9ff --- /dev/null +++ b/connector_importer/utils/report_html.py @@ -0,0 +1,128 @@ +# Author: Simone Orsi +# Copyright 2018 Camptocamp SA +# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html). + +import json + +EXAMPLEDATA = { + "last_summary": {"updated": 0, "skipped": 584, "errors": 0, "created": 414}, + "errors": [], + "last_start": "08/03/2018 13:46", + "skipped": [ + { + "model": "product.template", + "line": 3, + "message": "ALREADY EXISTS code: 8482", + "odoo_record": 6171, + }, + { + "model": "product.template", + "line": 4, + "message": "ALREADY EXISTS code: 8482", + "odoo_record": 6171, + }, + { + "model": "product.template", + "line": 5, + "message": "ALREADY EXISTS code: 8482", + "odoo_record": 6171, + }, + ], +} +JSONDATA = json.dumps(EXAMPLEDATA) + + +def link_record(record_id, model="", record=None, name_field="name", target="_new"): + """Link an existing odoo record.""" + name = "View" + if record: + default = getattr(record, "_rec_name", "Unknown") + name = getattr(record, name_field, default) + model = record._name + link = ( + f"""{name}""" + ) + return link + + +class Reporter: + """Produce a formatted HTML report from importer json data.""" + + def __init__(self, jsondata, detailed=False, full_url=""): + self._jsondata = jsondata + self._data = json.loads(self._jsondata) + self._html = [] + self._detailed = detailed + self._full_url = full_url + + def html(self, wrapped=True): + """Return HTML report.""" + self._produce() + content = "".join(self._html) + if wrapped: + return self._wrap("html", self._wrap("body", content)) + return content + + def _add(self, el): + self._html.append(el) + + def _wrap(self, tag, content): + return f"<{tag}>{content}" + + def _line(self, content): + return self._wrap("p", content) + + def _value(self, key, value): + return self._wrap("strong", key.capitalize() + ": ") + str(value) + + def _value_line(self, key, value): + return self._line(self._value(key, value)) + + def _line_to_msg(self, line): + res = [] + if line.get("line"): + res.append("CSV line: {}, ".format(line["line"])) + if line.get("message"): + res.append(line["message"]) + if "odoo_record" in line and "model" in line: + res.append(link_record(line["odoo_record"], model=line["model"])) + return " ".join(res) + + def _listing(self, lines, list_type="ol"): + _lines = [] + for line in lines: + _lines.append(self._wrap("li", self._line_to_msg(line))) + return self._wrap(list_type, "".join(_lines)) + + def _produce(self): + if not self._data.get("last_summary"): + return + # header + self._add(self._wrap("h2", "Last summary")) + # start date + self._add(self._value_line("Last start", self._data["last_start"])) + # global counters + summary_items = self._data["last_summary"].items() + for key, value in summary_items: + last = key == summary_items[-1][0] + self._add(self._value(key, value) + (" - " if not last else "")) + if self._detailed: + self._add(self._wrap("h3", "Details")) + if self._data["skipped"]: + self._add(self._wrap("h4", "Skipped")) + # skip messages + self._add(self._listing(self._data["skipped"])) + if self._data["errors"]: + self._add(self._wrap("h4", "Errors")) + # skip messages + self._add(self._listing(self._data["errors"])) + if self._full_url: + link = f'View full report' + self._add(self._line(link)) + + +if __name__ == "__main__": + reporter = Reporter(JSONDATA, detailed=1) + # pylint: disable=print-used + print(reporter.html()) diff --git a/connector_importer/views/backend_views.xml b/connector_importer/views/backend_views.xml new file mode 100644 index 000000000..7503240a1 --- /dev/null +++ b/connector_importer/views/backend_views.xml @@ -0,0 +1,114 @@ + + + + + import.backend + +
+ + +
+
+

Import

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +