From 8d162097a68de924d19e5b4f532bbc4b32766b56 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 17 Jan 2025 17:01:59 +0400 Subject: [PATCH 1/4] Update protobuf requirement from <4.0.0,>=3.18.1 to >=3.18.1,<6.0.0 in /tests (#28405) Updates the requirements on [protobuf](https://github.com/protocolbuffers/protobuf) to permit the latest version.
Commits

Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Ilya Lavrenov --- tests/constraints.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/constraints.txt b/tests/constraints.txt index eea8f81e6571c7..30ba701095ecf4 100644 --- a/tests/constraints.txt +++ b/tests/constraints.txt @@ -14,7 +14,7 @@ tensorflow>=2.5,<2.19.0 requests>=2.25.1 opencv-python>=4.5 paddlepaddle==2.6.1 -protobuf>=3.18.1,<4.0.0 +protobuf>=3.18.1,<6.0.0 py>=1.9.0 pytest>=5.0,<8.4 pytest-dependency==0.5.1 From 58766e7c7606b38767710429daf8fb11e147e55b Mon Sep 17 00:00:00 2001 From: Alexandra Sidorova Date: Fri, 17 Jan 2025 18:04:03 +0400 Subject: [PATCH 2/4] [Snippets] Implemented SetDynamicWAToOuterMostLoop pass (#28505) ### Details: - *Dynamic MHA Subgraphs may have only dynamic batch. Then the pass `MHAParallelWAOptimizer` cannot be applied to this subgraph to increase parallel work amount since outermost Loop By M in MHA has static work amount. Then Subgraph may be inefficiently executed. This PR implemented the pass `SetDynamicWAToOuterMostLoop ` which sets dynamic work amount to outmost Loop in dynamic MHA to make applicable `MHAParallelWAOptimizer` in runtime.* ### Tickets: - *160647* --- .../pass/mha_parallel_wa_optimizer.hpp | 9 ++- .../pass/set_dynamic_wa_to_outermost_loop.hpp | 30 ++++++++ .../pass/mha_parallel_wa_optimizer.cpp | 8 +- .../pass/set_dynamic_wa_to_outermost_loop.cpp | 73 +++++++++++++++++++ src/common/snippets/src/op/subgraph.cpp | 2 + .../snippets/mha_wo_transpose.cpp | 5 ++ 6 files changed, 123 insertions(+), 4 deletions(-) create mode 100644 src/common/snippets/include/snippets/lowered/pass/set_dynamic_wa_to_outermost_loop.hpp create mode 100644 src/common/snippets/src/lowered/pass/set_dynamic_wa_to_outermost_loop.cpp diff --git a/src/common/snippets/include/snippets/lowered/pass/mha_parallel_wa_optimizer.hpp b/src/common/snippets/include/snippets/lowered/pass/mha_parallel_wa_optimizer.hpp index 2f42a523ec4eac..7a49f5942f1db2 100644 --- a/src/common/snippets/include/snippets/lowered/pass/mha_parallel_wa_optimizer.hpp +++ b/src/common/snippets/include/snippets/lowered/pass/mha_parallel_wa_optimizer.hpp @@ -12,6 +12,8 @@ namespace ov { namespace snippets { namespace lowered { namespace pass { + +class SetDynamicWAToOuterMostLoop; /** * @class MHAParallelWAOptimizer * @brief Optimizes the dynamic MHA execution increasing parallel work amount dy dividing Brgemm's "M" dimension to "parallel_m" @@ -22,6 +24,7 @@ namespace pass { * - Determines loops that should be adjusted. */ class MHAParallelWAOptimizer : public lowered::pass::RuntimeOptimizer { + friend class SetDynamicWAToOuterMostLoop; public: OPENVINO_RTTI("MHAParallelWAOptimizer", "", RuntimeOptimizer) MHAParallelWAOptimizer() = default; @@ -31,10 +34,14 @@ class MHAParallelWAOptimizer : public lowered::pass::RuntimeOptimizer { bool applicable() const override { return !m_loops_to_split.empty(); } private: - static std::unordered_set find_applicable_brgemms(const lowered::LinearIRCPtr& linear_ir); + static std::unordered_set find_applicable_brgemms( + const lowered::LinearIRCPtr& linear_ir, + bool check_dynamic_wa = true); + static std::unordered_set find_unsqueezed_params( const lowered::LinearIRCPtr& linear_ir, const std::unordered_set& brgemms); + static std::vector find_loops_to_split( const lowered::LinearIRCPtr& linear_ir, const std::unordered_set& unsqueezed_params); diff --git a/src/common/snippets/include/snippets/lowered/pass/set_dynamic_wa_to_outermost_loop.hpp b/src/common/snippets/include/snippets/lowered/pass/set_dynamic_wa_to_outermost_loop.hpp new file mode 100644 index 00000000000000..6daeb97ec8c566 --- /dev/null +++ b/src/common/snippets/include/snippets/lowered/pass/set_dynamic_wa_to_outermost_loop.hpp @@ -0,0 +1,30 @@ +// Copyright (C) 2023-2025 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "pass.hpp" + +namespace ov { +namespace snippets { +namespace lowered { +namespace pass { + +/** + * @interface SetDynamicWAToOuterMostLoop + * @brief The pass set dynamic work amount to outermost Loop by M in dynamic MHA Subgraphs + * to allow MHAParallelWAOptimizer optimizes parallel work amount in runtime. + * @ingroup snippets + */ +class SetDynamicWAToOuterMostLoop : public Pass { +public: + OPENVINO_RTTI("SetDynamicWAToOuterMostLoop", "", Pass); + SetDynamicWAToOuterMostLoop() = default; + bool run(LinearIR& linear_ir) override; +}; + +} // namespace pass +} // namespace lowered +} // namespace snippets +} // namespace ov diff --git a/src/common/snippets/src/lowered/pass/mha_parallel_wa_optimizer.cpp b/src/common/snippets/src/lowered/pass/mha_parallel_wa_optimizer.cpp index c75d1e86abbfa5..bb01346f4eff7d 100644 --- a/src/common/snippets/src/lowered/pass/mha_parallel_wa_optimizer.cpp +++ b/src/common/snippets/src/lowered/pass/mha_parallel_wa_optimizer.cpp @@ -85,7 +85,9 @@ bool MHAParallelWAOptimizer::run(const lowered::LinearIR& linear_ir) { return true; } -std::unordered_set MHAParallelWAOptimizer::find_applicable_brgemms(const lowered::LinearIRCPtr& linear_ir) { +std::unordered_set MHAParallelWAOptimizer::find_applicable_brgemms( + const lowered::LinearIRCPtr& linear_ir, + bool check_dynamic_wa) { auto is_brgemm = [](const lowered::ExpressionPtr& expr) { return ov::is_type(expr->get_node()); }; @@ -96,12 +98,12 @@ std::unordered_set MHAParallelWAOptimizer::find_applicab brgemm_it = std::find_if(std::next(brgemm_it), linear_ir->end(), is_brgemm); } const auto& loop_manager = linear_ir->get_loop_manager(); - auto applicable_brgemm = [&loop_manager](const lowered::ExpressionPtr& expr) { + auto applicable_brgemm = [&loop_manager, check_dynamic_wa](const lowered::ExpressionPtr& expr) { const auto& loop_idces = expr->get_loop_ids(); if (loop_idces.empty()) return false; const auto& outermost_loop = loop_manager->get_loop_info(loop_idces[0]); - if (!snippets::utils::is_dynamic_value(outermost_loop->get_work_amount())) + if (check_dynamic_wa && !snippets::utils::is_dynamic_value(outermost_loop->get_work_amount())) return false; bool loop_by_m = true; outermost_loop->iterate_through_ports([&loop_by_m](const lowered::LoopPort& port) { diff --git a/src/common/snippets/src/lowered/pass/set_dynamic_wa_to_outermost_loop.cpp b/src/common/snippets/src/lowered/pass/set_dynamic_wa_to_outermost_loop.cpp new file mode 100644 index 00000000000000..8a5db80f577aee --- /dev/null +++ b/src/common/snippets/src/lowered/pass/set_dynamic_wa_to_outermost_loop.cpp @@ -0,0 +1,73 @@ +// Copyright (C) 2023-2025 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "snippets/lowered/pass/set_dynamic_wa_to_outermost_loop.hpp" + +#include "snippets/lowered/pass/mha_parallel_wa_optimizer.hpp" +#include "snippets/itt.hpp" +#include "snippets/lowered/linear_ir.hpp" +#include "snippets/lowered/loop_manager.hpp" +#include "snippets/op/brgemm.hpp" +#include "snippets/utils/loop_utils.hpp" + +namespace ov { +namespace snippets { +namespace lowered { +namespace pass { + +bool SetDynamicWAToOuterMostLoop::run(LinearIR& linear_ir) { + OV_ITT_SCOPED_TASK(ov::pass::itt::domains::SnippetsTransform, "Snippets::SetDynamicWAToOuterMostLoop") + if (linear_ir.empty() || !linear_ir.is_dynamic() || linear_ir.get_config().m_enable_domain_optimization) + return false; + + const auto linear_ir_ptr = std::make_shared(linear_ir); + const auto brgemms = MHAParallelWAOptimizer::find_applicable_brgemms(linear_ir_ptr, false); + if (brgemms.empty()) + return false; + + const auto unsqueezed_params = MHAParallelWAOptimizer::find_unsqueezed_params(linear_ir_ptr, brgemms); + OPENVINO_ASSERT(!unsqueezed_params.empty(), "unsqueezed_params mustn't be empty after initialization"); + + + const auto& loop_manager = linear_ir_ptr->get_loop_manager(); + std::unordered_set affected_loops; + size_t prev_loop_id = std::numeric_limits::max(); + static const size_t dim_M_idx = 1; + + auto add_affected_loop = [&](const lowered::ExpressionPtr& expr) { + const auto& loop_idces = expr->get_loop_ids(); + if (loop_idces.empty() || loop_idces.front() == prev_loop_id) + return; + + prev_loop_id = loop_idces.front(); + const auto loop_info = loop_manager->get_loop_info(prev_loop_id); + if (loop_info->get_dim_idx() == dim_M_idx) { + affected_loops.insert(loop_info); + } + }; + + size_t i = 0; + std::unordered_set visited; + for (const auto& param : linear_ir_ptr->get_parameters()) { + if (unsqueezed_params.count(i++)) + continue; + utils::visit_path(param, visited, add_affected_loop, false); + } + + bool modified = false; + for (const auto& loop : affected_loops) { + if (!utils::is_dynamic_value(loop->get_work_amount())) { + loop->set_work_amount(utils::get_dynamic_value()); + ov::snippets::utils::update_data_pointer_shifts(loop); + modified = true; + } + } + + return modified; +} + +} // namespace pass +} // namespace lowered +} // namespace snippets +} // namespace ov \ No newline at end of file diff --git a/src/common/snippets/src/op/subgraph.cpp b/src/common/snippets/src/op/subgraph.cpp index ecfa72bcb20919..42820889e2f63f 100644 --- a/src/common/snippets/src/op/subgraph.cpp +++ b/src/common/snippets/src/op/subgraph.cpp @@ -54,6 +54,7 @@ #include "snippets/lowered/pass/validate_expanded_loops.hpp" #include "snippets/lowered/pass/set_load_store_scalar.hpp" #include "snippets/lowered/pass/extract_loop_invariants.hpp" +#include "snippets/lowered/pass/set_dynamic_wa_to_outermost_loop.hpp" #include "snippets/lowered/pass/init_registers.hpp" @@ -467,6 +468,7 @@ void Subgraph::control_flow_transformations(size_t min_parallel_work_amount, siz pipeline.register_pass(); pipeline.register_pass(); pipeline.register_pass(); + pipeline.register_pass(); pipeline.register_pass(); pipeline.register_pass(m_linear_ir->get_config().m_are_buffers_optimized); pipeline.register_pass(); diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/snippets/mha_wo_transpose.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/snippets/mha_wo_transpose.cpp index 0967ef27087674..c6b11f48efa24c 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/snippets/mha_wo_transpose.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/snippets/mha_wo_transpose.cpp @@ -44,6 +44,11 @@ std::vector> originalShape_3D { {PartialShape{2, -1, 64}, {{2, 9, 64}, {2, 4, 64}, {2, 9, 64}}}, {PartialShape{2, 64, -1}, {{2, 64, 9}, {2, 64, 4}, {2, 64, 9}}}, {PartialShape{2, -1, 64}, {{2, 9, 64}, {2, 4, 64}, {2, 9, 64}}}, + }, + { + {PartialShape{-1, 128, 64}, {{1, 128, 64}, {2, 128, 64}, {1, 128, 64}}}, + {PartialShape{-1, 64, 128}, {{1, 64, 128}, {2, 64, 128}, {1, 64, 128}}}, + {PartialShape{-1, 128, 64}, {{1, 128, 64}, {2, 128, 64}, {1, 128, 64}}}, } }; From 049c8ba0b22e147172ec80b63122e3fd29ef02a5 Mon Sep 17 00:00:00 2001 From: Karol Blaszczak Date: Fri, 17 Jan 2025 15:08:03 +0100 Subject: [PATCH 3/4] [DOCS] realign menu pass 1 (#28486) --- README.md | 2 +- .../about-openvino/key-features.rst | 2 +- .../documentation/openvino-ecosystem.rst | 4 +- .../configurations/genai-dependencies.rst | 2 +- .../get-started/install-openvino.rst | 4 +- .../install-openvino-genai.rst | 4 +- docs/articles_en/learn-openvino.rst | 4 - ...e.rst => openvino-workflow-generative.rst} | 16 +- .../genai-model-preparation.rst | 0 .../inference-with-genai-on-npu.rst} | 0 .../inference-with-genai.rst} | 4 +- .../inference-with-optimum-intel.rst} | 0 .../ov-tokenizers.rst | 2 +- .../weight-compression.rst | 6 +- .../running-inference/stateful-models.rst | 2 +- .../obtaining-stateful-openvino-model.rst | 2 +- ...lm-agent-functioncall-qwen-with-output.rst | 2 +- .../llm-agent-react-langchain-with-output.rst | 162 +++++++++--------- .../notebooks/llm-agent-react-with-output.rst | 84 ++++----- .../llm-chatbot-generate-api-with-output.rst | 72 ++++---- docs/notebooks/llm-chatbot-with-output.rst | 6 +- ...multilora-image-generation-with-output.rst | 58 +++---- .../speculative-sampling-with-output.rst | 2 +- .../text-to-image-genai-with-output.rst | 58 +++---- .../selector-tool/assets/selector-DiE3WrtX.js | 2 +- docs/sphinx_setup/index.rst | 5 +- src/frontends/tensorflow/src/frontend.cpp | 2 +- 27 files changed, 252 insertions(+), 255 deletions(-) rename docs/articles_en/{learn-openvino/llm_inference_guide.rst => openvino-workflow-generative.rst} (86%) rename docs/articles_en/{learn-openvino/llm_inference_guide => openvino-workflow-generative}/genai-model-preparation.rst (100%) rename docs/articles_en/{learn-openvino/llm_inference_guide/genai-guide-npu.rst => openvino-workflow-generative/inference-with-genai-on-npu.rst} (100%) rename docs/articles_en/{learn-openvino/llm_inference_guide/genai-guide.rst => openvino-workflow-generative/inference-with-genai.rst} (99%) rename docs/articles_en/{learn-openvino/llm_inference_guide/llm-inference-hf.rst => openvino-workflow-generative/inference-with-optimum-intel.rst} (100%) rename docs/articles_en/{learn-openvino/llm_inference_guide => openvino-workflow-generative}/ov-tokenizers.rst (99%) diff --git a/README.md b/README.md index 8019bb892023f2..9ed2d4690e39e9 100644 --- a/README.md +++ b/README.md @@ -100,7 +100,7 @@ OpenVINO supports the CPU, GPU, and NPU [devices](https://docs.openvino.ai/2024/ ## Generative AI with OpenVINO -Get started with the OpenVINO GenAI [installation](https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-genai.html) and refer to the [detailed guide](https://docs.openvino.ai/2024/learn-openvino/llm_inference_guide/genai-guide.html) to explore the capabilities of Generative AI using OpenVINO. +Get started with the OpenVINO GenAI [installation](https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-genai.html) and refer to the [detailed guide](https://docs.openvino.ai/2024/openvino-workflow-generative/generative-inference.html) to explore the capabilities of Generative AI using OpenVINO. Learn how to run LLMs and GenAI with [Samples](https://github.com/openvinotoolkit/openvino.genai/tree/master/samples) in the [OpenVINO™ GenAI repo](https://github.com/openvinotoolkit/openvino.genai). See GenAI in action with Jupyter notebooks: [LLM-powered Chatbot](https://github.com/openvinotoolkit/openvino_notebooks/blob/latest/notebooks/llm-chatbot/README.md) and [LLM Instruction-following pipeline](https://github.com/openvinotoolkit/openvino_notebooks/blob/latest/notebooks/llm-question-answering/README.md). diff --git a/docs/articles_en/about-openvino/key-features.rst b/docs/articles_en/about-openvino/key-features.rst index 6514bdc67a3302..c751a5bc65d3cf 100644 --- a/docs/articles_en/about-openvino/key-features.rst +++ b/docs/articles_en/about-openvino/key-features.rst @@ -13,7 +13,7 @@ Easy Integration :doc:`torch.compile <../openvino-workflow/torch-compile>` to improve model inference. Apply OpenVINO optimizations to your PyTorch models directly with a single line of code. -| :doc:`GenAI Out Of The Box <../learn-openvino/llm_inference_guide/genai-guide>` +| :doc:`GenAI Out Of The Box <../openvino-workflow-generative/inference-with-genai>` | With the genAI flavor of OpenVINO, you can run generative AI with just a couple lines of code. Check out the GenAI guide for instructions on how to do it. diff --git a/docs/articles_en/documentation/openvino-ecosystem.rst b/docs/articles_en/documentation/openvino-ecosystem.rst index 1975fe0a48a181..cb62672c032412 100644 --- a/docs/articles_en/documentation/openvino-ecosystem.rst +++ b/docs/articles_en/documentation/openvino-ecosystem.rst @@ -24,7 +24,7 @@ you an overview of a whole ecosystem of tools and solutions under the OpenVINO u | **GenAI** | :bdg-link-dark:`Github ` - :bdg-link-success:`User Guide ` + :bdg-link-success:`User Guide ` OpenVINO™ GenAI Library aims to simplify running inference of generative AI models. Check the LLM-powered Chatbot Jupyter notebook to see how GenAI works. @@ -113,7 +113,7 @@ generative AI and vision models directly on your computer or edge device using O | **Tokenizers** | :bdg-link-dark:`Github ` - :bdg-link-success:`User Guide ` + :bdg-link-success:`User Guide ` OpenVINO Tokenizers add text processing operations to OpenVINO. diff --git a/docs/articles_en/get-started/configurations/genai-dependencies.rst b/docs/articles_en/get-started/configurations/genai-dependencies.rst index 4486890c3a40b8..6eec18a74f0f05 100644 --- a/docs/articles_en/get-started/configurations/genai-dependencies.rst +++ b/docs/articles_en/get-started/configurations/genai-dependencies.rst @@ -27,5 +27,5 @@ Additional Resources * :doc:`OpenVINO GenAI Installation Guide <../install-openvino/install-openvino-genai>` * `OpenVINO GenAI repository `__ * :doc:`OpenVINO Installation Guide <../install-openvino>` -* :doc:`OpenVINO Tokenizers <../../learn-openvino/llm_inference_guide/ov-tokenizers>` +* :doc:`OpenVINO Tokenizers <../../openvino-workflow-generative/ov-tokenizers>` diff --git a/docs/articles_en/get-started/install-openvino.rst b/docs/articles_en/get-started/install-openvino.rst index 7603adf37b7e89..401aa79213e6d7 100644 --- a/docs/articles_en/get-started/install-openvino.rst +++ b/docs/articles_en/get-started/install-openvino.rst @@ -35,8 +35,8 @@ All currently supported versions are: A new OpenVINO GenAI Flavor streamlines application development by providing LLM-specific interfaces for easy integration of language models, handling tokenization and text generation. For installation and usage instructions, proceed to - :doc:`Install OpenVINO GenAI Flavor <../learn-openvino/llm_inference_guide/genai-guide>` and - :doc:`Run LLMs with OpenVINO GenAI Flavor <../learn-openvino/llm_inference_guide/genai-guide>`. + :doc:`Install OpenVINO GenAI Flavor <../openvino-workflow-generative>` and + :doc:`Run LLMs with OpenVINO GenAI Flavor <../openvino-workflow-generative/inference-with-genai>`. .. dropdown:: Building OpenVINO from Source diff --git a/docs/articles_en/get-started/install-openvino/install-openvino-genai.rst b/docs/articles_en/get-started/install-openvino/install-openvino-genai.rst index bbfaa7817017ef..b548353b36977e 100644 --- a/docs/articles_en/get-started/install-openvino/install-openvino-genai.rst +++ b/docs/articles_en/get-started/install-openvino/install-openvino-genai.rst @@ -5,7 +5,7 @@ OpenVINO GenAI is a new flavor of OpenVINO, aiming to simplify running inference It hides the complexity of the generation process and minimizes the amount of code required. You can now provide a model and input context directly to OpenVINO, which performs tokenization of the input text, executes the generation loop on the selected device, and returns the generated text. -For a quickstart guide, refer to the :doc:`GenAI API Guide <../../learn-openvino/llm_inference_guide/genai-guide>`. +For a quickstart guide, refer to the :doc:`GenAI API Guide <../../openvino-workflow-generative/inference-with-genai>`. To see GenAI in action, check the Jupyter notebooks: `LLM-powered Chatbot `__ and @@ -28,7 +28,7 @@ but use the *openvino-genai* package instead of *openvino*: Archive Installation ############################### -The OpenVINO GenAI archive package includes the OpenVINO™ Runtime and :doc:`Tokenizers <../../learn-openvino/llm_inference_guide/ov-tokenizers>`. +The OpenVINO GenAI archive package includes the OpenVINO™ Runtime and :doc:`Tokenizers <../../openvino-workflow-generative/ov-tokenizers>`. To install the GenAI flavor of OpenVINO from an archive file, follow the standard installation steps for your system but instead of using the vanilla package file, download the one with OpenVINO GenAI: diff --git a/docs/articles_en/learn-openvino.rst b/docs/articles_en/learn-openvino.rst index 98797c9c67c126..762e51985159d3 100644 --- a/docs/articles_en/learn-openvino.rst +++ b/docs/articles_en/learn-openvino.rst @@ -14,7 +14,6 @@ Learn OpenVINO Interactive Tutorials (Python) Sample Applications (Python & C++) - Generative AI workflow @@ -28,6 +27,3 @@ as well as an experienced user. | :doc:`OpenVINO Samples ` | The OpenVINO samples (Python and C++) are simple console applications that show how to use specific OpenVINO API features. They can assist you in executing tasks such as loading a model, running inference, querying particular device capabilities, etc. - -| :doc:`Generative AI workflow ` -| Detailed information on how OpenVINO accelerates Generative AI use cases and what models it supports. This tutorial provides instructions for running Generative AI models using Hugging Face Optimum Intel and Native OpenVINO APIs. diff --git a/docs/articles_en/learn-openvino/llm_inference_guide.rst b/docs/articles_en/openvino-workflow-generative.rst similarity index 86% rename from docs/articles_en/learn-openvino/llm_inference_guide.rst rename to docs/articles_en/openvino-workflow-generative.rst index 8401923b8c7ac6..a4fa53335988ae 100644 --- a/docs/articles_en/learn-openvino/llm_inference_guide.rst +++ b/docs/articles_en/openvino-workflow-generative.rst @@ -9,10 +9,10 @@ Generative AI workflow :maxdepth: 1 :hidden: - Generative Model Preparation - Inference with OpenVINO GenAI - Inference with Optimum Intel - OpenVINO Tokenizers + Generative Model Preparation + Inference with OpenVINO GenAI + Inference with Optimum Intel + OpenVINO Tokenizers @@ -58,7 +58,7 @@ options: Note that the base version of OpenVINO may also be used to run generative AI. Although it may offer a simpler environment, with fewer dependencies, it has significant limitations and a more demanding implementation process. For reference, see -`the article on generative AI usage of OpenVINO 2024.6 `__. +`the article on generative AI usage of OpenVINO 2024.6 `__. The advantages of using OpenVINO for generative model deployment: @@ -90,8 +90,8 @@ The advantages of using OpenVINO for generative model deployment: Proceed to guides on: -* :doc:`OpenVINO GenAI Flavor <./llm_inference_guide/genai-guide>` -* :doc:`Hugging Face and Optimum Intel <./llm_inference_guide/llm-inference-hf>` -* `Generative AI with Base OpenVINO `__ +* :doc:`OpenVINO GenAI Flavor <./openvino-workflow-generative/inference-with-genai>` +* :doc:`Hugging Face and Optimum Intel <./openvino-workflow-generative/inference-with-optimum-intel>` +* `Generative AI with Base OpenVINO `__ diff --git a/docs/articles_en/learn-openvino/llm_inference_guide/genai-model-preparation.rst b/docs/articles_en/openvino-workflow-generative/genai-model-preparation.rst similarity index 100% rename from docs/articles_en/learn-openvino/llm_inference_guide/genai-model-preparation.rst rename to docs/articles_en/openvino-workflow-generative/genai-model-preparation.rst diff --git a/docs/articles_en/learn-openvino/llm_inference_guide/genai-guide-npu.rst b/docs/articles_en/openvino-workflow-generative/inference-with-genai-on-npu.rst similarity index 100% rename from docs/articles_en/learn-openvino/llm_inference_guide/genai-guide-npu.rst rename to docs/articles_en/openvino-workflow-generative/inference-with-genai-on-npu.rst diff --git a/docs/articles_en/learn-openvino/llm_inference_guide/genai-guide.rst b/docs/articles_en/openvino-workflow-generative/inference-with-genai.rst similarity index 99% rename from docs/articles_en/learn-openvino/llm_inference_guide/genai-guide.rst rename to docs/articles_en/openvino-workflow-generative/inference-with-genai.rst index 43f9435bf79b1b..1f19c3eed7da8f 100644 --- a/docs/articles_en/learn-openvino/llm_inference_guide/genai-guide.rst +++ b/docs/articles_en/openvino-workflow-generative/inference-with-genai.rst @@ -8,7 +8,7 @@ Inference with OpenVINO GenAI :maxdepth: 1 :hidden: - NPU inference of LLMs + NPU inference of LLMs OpenVINO™ GenAI is a library of pipelines and methods, extending the OpenVINO runtime to work @@ -16,7 +16,7 @@ with generative AI models more efficiently. This article provides reference code on its usage. Note that the base OpenVINO version will not work with these instructions, make sure to :doc:`install OpenVINO with GenAI <../../get-started/install-openvino/install-openvino-genai>`. -.. image:: ../../assets/images/genai_main_diagram.svg +.. image:: ../assets/images/genai_main_diagram.svg :align: center :alt: OpenVINO GenAI workflow diagram diff --git a/docs/articles_en/learn-openvino/llm_inference_guide/llm-inference-hf.rst b/docs/articles_en/openvino-workflow-generative/inference-with-optimum-intel.rst similarity index 100% rename from docs/articles_en/learn-openvino/llm_inference_guide/llm-inference-hf.rst rename to docs/articles_en/openvino-workflow-generative/inference-with-optimum-intel.rst diff --git a/docs/articles_en/learn-openvino/llm_inference_guide/ov-tokenizers.rst b/docs/articles_en/openvino-workflow-generative/ov-tokenizers.rst similarity index 99% rename from docs/articles_en/learn-openvino/llm_inference_guide/ov-tokenizers.rst rename to docs/articles_en/openvino-workflow-generative/ov-tokenizers.rst index 1dbd85e3ee59a5..c836eb52e99495 100644 --- a/docs/articles_en/learn-openvino/llm_inference_guide/ov-tokenizers.rst +++ b/docs/articles_en/openvino-workflow-generative/ov-tokenizers.rst @@ -6,7 +6,7 @@ generation with LLMs. Tokenizers convert the input text into a sequence of token corresponding IDs, so that the model can understand and process it during inference. The transformation of a sequence of numbers into a string is called detokenization. -.. image:: ../../assets/images/tokenization.svg +.. image:: ../assets/images/tokenization.svg :align: center There are two important points in the tokenizer-model relation: diff --git a/docs/articles_en/openvino-workflow/model-optimization-guide/weight-compression.rst b/docs/articles_en/openvino-workflow/model-optimization-guide/weight-compression.rst index 4b752b74187768..232e0f2c2a66b9 100644 --- a/docs/articles_en/openvino-workflow/model-optimization-guide/weight-compression.rst +++ b/docs/articles_en/openvino-workflow/model-optimization-guide/weight-compression.rst @@ -105,7 +105,7 @@ By default, weights are compressed asymmetrically to "INT8_ASYM" mode. print(results) For more details, refer to the article on how to - :doc:`infer LLMs using Optimum Intel <../../learn-openvino/llm_inference_guide/llm-inference-hf>`. + :doc:`infer LLMs using Optimum Intel <../../openvino-workflow-generative/inference-with-optimum-intel>`. .. tab-item:: Compression with NNCF :sync: nncf @@ -221,7 +221,7 @@ depending on the model. For more details, refer to the article on how to - :doc:`infer LLMs using Optimum Intel <../../../learn-openvino/llm_inference_guide/llm-inference-hf>`. + :doc:`infer LLMs using Optimum Intel <../../../openvino-workflow-generative/inference-with-optimum-intel>`. The code snippet below shows how to do 4-bit quantization of the model weights represented in OpenVINO IR using NNCF: @@ -344,7 +344,7 @@ load the compressed model later for faster time to first inference. .. tip:: Models optimized with with NNCF or Optimum Intel can be used with - :doc:`OpenVINO GenAI <../../learn-openvino/llm_inference_guide/genai-guide>`. + :doc:`OpenVINO GenAI <../../openvino-workflow-generative/inference-with-genai>`. Auto-tuning of Weight Compression Parameters diff --git a/docs/articles_en/openvino-workflow/running-inference/stateful-models.rst b/docs/articles_en/openvino-workflow/running-inference/stateful-models.rst index d00fd19c4d636d..55626d485c412d 100644 --- a/docs/articles_en/openvino-workflow/running-inference/stateful-models.rst +++ b/docs/articles_en/openvino-workflow/running-inference/stateful-models.rst @@ -66,7 +66,7 @@ from the application code to OpenVINO and all related internal work is hidden fr There are three methods of turning an OpenVINO model into a stateful one: -* :doc:`Optimum-Intel <../../learn-openvino/llm_inference_guide/llm-inference-hf>` - the most user-friendly option. All necessary optimizations +* :doc:`Optimum-Intel <../../openvino-workflow-generative/inference-with-optimum-intel>` - the most user-friendly option. All necessary optimizations are recognized and applied automatically. The drawback is, the tool does not work with all models. diff --git a/docs/articles_en/openvino-workflow/running-inference/stateful-models/obtaining-stateful-openvino-model.rst b/docs/articles_en/openvino-workflow/running-inference/stateful-models/obtaining-stateful-openvino-model.rst index 12a21a5dd1fad0..0ad6530cb61188 100644 --- a/docs/articles_en/openvino-workflow/running-inference/stateful-models/obtaining-stateful-openvino-model.rst +++ b/docs/articles_en/openvino-workflow/running-inference/stateful-models/obtaining-stateful-openvino-model.rst @@ -10,7 +10,7 @@ and you have three ways to do it: * `Optimum-Intel `__ - an automated solution applicable to a selection of models (not covered by this article, for a usage guide - refer to the :doc:`LLM Inference with Hugging Face and Optimum Intel <../../../learn-openvino/llm_inference_guide>` article). + refer to the :doc:`LLM Inference with Hugging Face and Optimum Intel <../../../openvino-workflow-generative>` article). * :ref:`MakeStateful transformation ` - to choose which pairs of Parameter and Result to replace. * :ref:`LowLatency2 transformation ` - to detect and replace Parameter diff --git a/docs/notebooks/llm-agent-functioncall-qwen-with-output.rst b/docs/notebooks/llm-agent-functioncall-qwen-with-output.rst index 07f84987dca33e..051e83eff184bb 100644 --- a/docs/notebooks/llm-agent-functioncall-qwen-with-output.rst +++ b/docs/notebooks/llm-agent-functioncall-qwen-with-output.rst @@ -258,7 +258,7 @@ pipeline. You can get additional inference speed improvement with `Dynamic Quantization of activations and KV-cache quantization on -CPU `__. +CPU `__. These options can be enabled with ``ov_config`` as follows: .. code:: ipython3 diff --git a/docs/notebooks/llm-agent-react-langchain-with-output.rst b/docs/notebooks/llm-agent-react-langchain-with-output.rst index 9adb0311542426..7313d4c454c42a 100644 --- a/docs/notebooks/llm-agent-react-langchain-with-output.rst +++ b/docs/notebooks/llm-agent-react-langchain-with-output.rst @@ -70,12 +70,12 @@ Prerequisites import requests from pathlib import Path - + r = requests.get( url="https://raw.githubusercontent.com/openvinotoolkit/openvino_notebooks/latest/utils/notebook_utils.py", ) open("notebook_utils.py", "w").write(r.text) - + if not Path("cmd_helper.py").exists(): r = requests.get(url="https://raw.githubusercontent.com/openvinotoolkit/openvino_notebooks/latest/utils/cmd_helper.py") open("cmd_helper.py", "w", encoding="utf-8").write(r.text) @@ -92,9 +92,9 @@ Prerequisites .. code:: ipython3 import os - + os.environ["GIT_CLONE_PROTECTION_ACTIVE"] = "false" - + %pip install -Uq pip %pip uninstall -q -y optimum optimum-intel %pip install --pre -Uq "openvino>=2024.5.0" openvino-tokenizers[transformers] --extra-index-url https://storage.openvinotoolkit.org/simple/wheels/nightly @@ -131,20 +131,20 @@ creating custom tools. .. code:: ipython3 from langchain_core.tools import tool - - + + @tool def multiply(first_int: int, second_int: int) -> int: """Multiply two integers together.""" return first_int * second_int - - + + @tool def add(first_int: int, second_int: int) -> int: "Add two integers." return first_int + second_int - - + + @tool def exponentiate(base: int, exponent: int) -> int: "Exponentiate the base to the exponent power." @@ -213,22 +213,22 @@ previous agent tool invocations and the corresponding tool outputs. .. code:: ipython3 PREFIX = """Respond to the human as helpfully and accurately as possible. You have access to the following tools:""" - + FORMAT_INSTRUCTIONS = """Use a json blob to specify a tool by providing an action key (tool name) and an action_input key (tool input). - + Valid "action" values: "Final Answer" or {tool_names} - + Provide only ONE action per $JSON_BLOB, as shown: - + ``` {{{{ "action": $TOOL_NAME, "action_input": $INPUT }}}} ``` - + Follow this format: - + Question: input question to answer Thought: consider previous and subsequent steps Action: @@ -245,10 +245,10 @@ previous agent tool invocations and the corresponding tool outputs. "action_input": "Final response to human" }}}} ```""" - + SUFFIX = """Begin! Reminder to ALWAYS respond with a valid json blob of a single action. Use tools if necessary. Respond directly if appropriate. Format is Action:```$JSON_BLOB```then Observation:. Thought:""" - + HUMAN_MESSAGE_TEMPLATE = "{input}\n\n{agent_scratchpad}" Create LLM @@ -294,7 +294,7 @@ select following models as LLM in agent pipeline. .. code:: python - ## login to huggingfacehub to get access to pretrained model + ## login to huggingfacehub to get access to pretrained model from huggingface_hub import notebook_login, whoami @@ -318,16 +318,16 @@ folder. .. code:: ipython3 import ipywidgets as widgets - + llm_model_ids = ["Qwen/Qwen2.5-7B-Instruct", "Qwen/Qwen2.5-3B-Instruct", "Qwen/qwen2.5-14b-instruct", "meta-llama/Meta-Llama-3.1-8B-Instruct"] - + llm_model_id = widgets.Dropdown( options=llm_model_ids, value=llm_model_ids[0], description="Model:", disabled=False, ) - + llm_model_id @@ -342,10 +342,10 @@ folder. .. code:: ipython3 from cmd_helper import optimum_cli - + llm_model_path = llm_model_id.value.split("/")[-1] repo_name = llm_model_id.value.split("/")[0] - + if not Path(llm_model_path).exists(): optimum_cli( llm_model_id.value, llm_model_path, additional_args={"task": "text-generation-with-past", "weight-format": "int4", "group-size": "128", "ratio": "1.0"} @@ -359,9 +359,9 @@ Select inference device for LLM .. code:: ipython3 from notebook_utils import device_widget - + device = device_widget("CPU", exclude=["NPU"]) - + device @@ -383,37 +383,37 @@ information `__. from langchain_huggingface import HuggingFacePipeline from transformers.generation.stopping_criteria import StoppingCriteriaList, StoppingCriteria - + import openvino.properties as props import openvino.properties.hint as hints import openvino.properties.streams as streams - - + + class StopSequenceCriteria(StoppingCriteria): """ This class can be used to stop generation whenever a sequence of tokens is encountered. - + Args: stop_sequences (`str` or `List[str]`): The sequence (or list of sequences) on which to stop execution. tokenizer: The tokenizer used to decode the model outputs. """ - + def __init__(self, stop_sequences, tokenizer): if isinstance(stop_sequences, str): stop_sequences = [stop_sequences] self.stop_sequences = stop_sequences self.tokenizer = tokenizer - + def __call__(self, input_ids, scores, **kwargs) -> bool: decoded_output = self.tokenizer.decode(input_ids.tolist()[0]) return any(decoded_output.endswith(stop_sequence) for stop_sequence in self.stop_sequences) - - + + ov_config = {hints.performance_mode(): hints.PerformanceMode.LATENCY, streams.num(): "1", props.cache_dir(): ""} stop_tokens = ["Observation:"] - + ov_llm = HuggingFacePipeline.from_model_id( model_id=llm_model_path, task="text-generation", @@ -425,20 +425,20 @@ information `__. }, pipeline_kwargs={"max_new_tokens": 2048}, ) - + tokenizer = ov_llm.pipeline.tokenizer ov_llm.pipeline._forward_params["stopping_criteria"] = StoppingCriteriaList([StopSequenceCriteria(stop_tokens, tokenizer)]) .. code:: ipython3 from langchain_huggingface import ChatHuggingFace - + ov_chat = ChatHuggingFace(llm=ov_llm, verbose=True) ov_chat = ov_chat.bind(skip_prompt=True, stop=["Observation:"]) You can get additional inference speed improvement with `Dynamic Quantization of activations and KV-cache quantization on -CPU `__. +CPU `__. These options can be enabled with ``ov_config`` as follows: .. code:: ipython3 @@ -466,7 +466,7 @@ outputs back to the agent, and repeats. .. code:: ipython3 from langchain.agents import AgentExecutor, StructuredChatAgent - + agent = StructuredChatAgent.from_llm_and_tools( ov_chat, tools, @@ -494,11 +494,11 @@ prompt template. .. parsed-literal:: - - + + > Entering new AgentExecutor chain... Thought: First, we need to take 3 to the fifth power. Then we will find the sum of twelve and three. After that, we multiply the first result by the second result. Finally, we'll square the whole result. - + Action: ``` { @@ -512,7 +512,7 @@ prompt template. Observation: Observation: 243 Thought:Next, let's find the sum of twelve and three. - + Action: ``` { @@ -526,7 +526,7 @@ prompt template. Observation: Observation: 15 Thought:Now, we will multiply the result of \(3^5\) (which is 243) by the sum of 12 and 3 (which is 15). - + Action: ``` { @@ -539,8 +539,8 @@ prompt template. ``` Observation: Observation: 3645 - Thought:Thought: Now, we need to square the result of the multiplication (3645). - + Thought:Thought: Now, we need to square the result of the multiplication (3645). + Action: ``` { @@ -553,7 +553,7 @@ prompt template. ``` Observation: 13286025 Thought:Thought: I know what to respond - + Action: ``` { @@ -561,7 +561,7 @@ prompt template. "action_input": "The final result is 13286025." } ``` - + > Finished chain. @@ -598,10 +598,10 @@ words generated by agent. from langchain_community.utilities import WikipediaAPIWrapper from langchain_core.callbacks import CallbackManagerForToolRun from typing import Optional - + from pydantic import BaseModel, Field - - + + class WikipediaQueryRunWrapper(WikipediaQueryRun): def _run( self, @@ -610,17 +610,17 @@ words generated by agent. ) -> str: """Use the Wikipedia tool.""" return self.api_wrapper.run(text) - - + + api_wrapper = WikipediaAPIWrapper(top_k_results=2, doc_content_chars_max=1000) - - + + class WikiInputs(BaseModel): """inputs to the wikipedia tool.""" - + text: str = Field(description="query to look up on wikipedia.") - - + + wikipedia = WikipediaQueryRunWrapper( description="A wrapper around Wikipedia. Useful for when you need to answer general questions about people, places, companies, facts, historical events, or other subjects. Input should be a search query.", args_schema=WikiInputs, @@ -652,8 +652,8 @@ In this examples, we will create 2 customized tools for import urllib.parse import json5 - - + + @tool def painting(prompt: str) -> str: """ @@ -661,8 +661,8 @@ In this examples, we will create 2 customized tools for """ prompt = urllib.parse.quote(prompt) return json5.dumps({"image_url": f"https://image.pollinations.ai/prompt/{prompt}"}, ensure_ascii=False) - - + + painting.invoke({"prompt": "a cat"}) @@ -683,10 +683,10 @@ In this examples, we will create 2 customized tools for """ Get the current weather for `city_name` """ - + if not isinstance(city_name, str): raise TypeError("City name must be a string") - + key_selection = { "current_condition": [ "temp_C", @@ -697,15 +697,15 @@ In this examples, we will create 2 customized tools for ], } import requests - + resp = requests.get(f"https://wttr.in/{city_name}?format=j1") resp.raise_for_status() resp = resp.json() ret = {k: {_v: resp[k][0][_v] for _v in v} for k, v in key_selection.items()} - + return str(ret) - - + + weather.invoke({"city_name": "London"}) @@ -725,7 +725,7 @@ Create AI agent demo with Gradio UI .. code:: ipython3 tools = [wikipedia, painting, weather] - + agent = StructuredChatAgent.from_llm_and_tools( ov_chat, tools, @@ -741,28 +741,28 @@ Create AI agent demo with Gradio UI def partial_text_processor(partial_text, new_text): """ helper for updating partially generated answer, used by default - + Params: partial_text: text buffer for storing previosly generated text new_text: text update for the current step Returns: updated text string - + """ partial_text += new_text return partial_text - - + + def run_chatbot(history): """ callback function for running chatbot on submit button click - + Params: history: conversation history - + """ partial_text = "" - + for new_text in agent_executor.stream( {"input": history[-1][0]}, ): @@ -770,8 +770,8 @@ Create AI agent demo with Gradio UI partial_text = partial_text_processor(partial_text, new_text["output"]) history[-1][1] = partial_text yield history - - + + def request_cancel(): ov_chat.llm.pipeline.model.request.cancel() @@ -780,11 +780,11 @@ Create AI agent demo with Gradio UI if not Path("gradio_helper.py").exists(): r = requests.get(url="https://raw.githubusercontent.com/openvinotoolkit/openvino_notebooks/latest/notebooks/llm-agent-react/gradio_helper.py") open("gradio_helper.py", "w").write(r.text) - + from gradio_helper import make_demo - + demo = make_demo(run_fn=run_chatbot, stop_fn=request_cancel) - + try: demo.launch() except Exception: diff --git a/docs/notebooks/llm-agent-react-with-output.rst b/docs/notebooks/llm-agent-react-with-output.rst index 791355276fd2fd..8741f5f5254013 100644 --- a/docs/notebooks/llm-agent-react-with-output.rst +++ b/docs/notebooks/llm-agent-react-with-output.rst @@ -62,22 +62,22 @@ Prerequisites import os import requests - - + + r = requests.get( url="https://raw.githubusercontent.com/openvinotoolkit/openvino_notebooks/latest/utils/notebook_utils.py", ) open("notebook_utils.py", "w").write(r.text) - + r = requests.get( url="https://raw.githubusercontent.com/openvinotoolkit/openvino_notebooks/latest/utils/pip_helper.py", ) open("pip_helper.py", "w").write(r.text) - + os.environ["GIT_CLONE_PROTECTION_ACTIVE"] = "false" - + from pip_helper import pip_install - + pip_install( "-q", "--extra-index-url", @@ -122,16 +122,16 @@ Vietnamese, Thai, Arabic, and more. For more details, please refer to .. code:: ipython3 import ipywidgets as widgets - + llm_model_ids = ["Qwen/Qwen2.5-3B-Instruct", "Qwen/Qwen2.5-7B-Instruct", "Qwen/qwen2.5-14b-instruct"] - + llm_model_id = widgets.Dropdown( options=llm_model_ids, value=llm_model_ids[0], description="Model:", disabled=False, ) - + llm_model_id @@ -146,9 +146,9 @@ Vietnamese, Thai, Arabic, and more. For more details, please refer to .. code:: ipython3 from pathlib import Path - + llm_model_path = llm_model_id.value.split("/")[-1] - + if not Path(llm_model_path).exists(): !optimum-cli export openvino --model {llm_model_id.value} --task text-generation-with-past --trust-remote-code --weight-format int4 --group-size 128 --ratio 1.0 --sym {llm_model_path} @@ -160,9 +160,9 @@ Select inference device for LLM .. code:: ipython3 from notebook_utils import device_widget - + llm_device = device_widget("CPU", exclude=["NPU"]) - + llm_device @@ -213,7 +213,7 @@ Tokenizer class and pipelines API are compatible with Optimum models. You can find more details about OpenVINO LLM inference using HuggingFace Optimum API in `LLM inference -guide `__. +guide `__. .. code:: ipython3 @@ -226,15 +226,15 @@ guide `__ import openvino.properties as props import openvino.properties.hint as hints import openvino.properties.streams as streams - + import json import json5 import torch - + tokenizer = AutoTokenizer.from_pretrained(llm_model_path, trust_remote_code=True) - + ov_config = {hints.performance_mode(): hints.PerformanceMode.LATENCY, streams.num(): "1", props.cache_dir(): ""} - + llm = OVModelForCausalLM.from_pretrained( llm_model_path, device=llm_device.value, @@ -242,7 +242,7 @@ guide `__ config=AutoConfig.from_pretrained(llm_model_path, trust_remote_code=True), trust_remote_code=True, ) - + llm.generation_config.top_k = 1 llm.generation_config.max_length = 2000 @@ -260,31 +260,31 @@ received from tool calling.. class StopSequenceCriteria(StoppingCriteria): """ This class can be used to stop generation whenever a sequence of tokens is encountered. - + Args: stop_sequences (`str` or `List[str]`): The sequence (or list of sequences) on which to stop execution. tokenizer: The tokenizer used to decode the model outputs. """ - + def __init__(self, stop_sequences, tokenizer): if isinstance(stop_sequences, str): stop_sequences = [stop_sequences] self.stop_sequences = stop_sequences self.tokenizer = tokenizer - + def __call__(self, input_ids, scores, **kwargs) -> bool: decoded_output = self.tokenizer.decode(input_ids.tolist()[0]) return any(decoded_output.endswith(stop_sequence) for stop_sequence in self.stop_sequences) - - + + def text_completion(prompt: str, stop_words) -> str: im_end = "<|im_end|>" if im_end not in stop_words: stop_words = stop_words + [im_end] streamer = TextStreamer(tokenizer, timeout=60.0, skip_prompt=True, skip_special_tokens=True) - + stopping_criteria = StoppingCriteriaList([StopSequenceCriteria(stop_words, tokenizer)]) input_ids = torch.tensor([tokenizer.encode(prompt)]) generate_kwargs = dict( @@ -297,7 +297,7 @@ received from tool calling.. output = tokenizer.decode(output, errors="ignore") assert output.startswith(prompt) output = output[len(prompt) :].replace("<|endoftext|>", "").replace(im_end, "") - + for stop_str in stop_words: idx = output.find(stop_str) if idx != -1: @@ -339,13 +339,13 @@ parameter should be a sequence of messages that contains the .. code:: ipython3 TOOL_DESC = """{name_for_model}: Call this tool to interact with the {name_for_human} API. What is the {name_for_human} API useful for? {description_for_model} Parameters: {parameters}""" - + PROMPT_REACT = """Answer the following questions as best you can. You have access to the following APIs: - + {tools_text} - + Use the following format: - + Question: the input question you must answer Thought: you should always think about what to do Action: the action to take, should be one of [{tools_name_text}] @@ -354,9 +354,9 @@ parameter should be a sequence of messages that contains the ... (this Thought/Action/Action Input/Observation can be repeated zero or more times) Thought: I now know the final answer Final Answer: the final answer to the original input question - + Begin! - + Question: {query}""" Meanwhile we have to create function for consolidate the tools @@ -381,9 +381,9 @@ information and conversation history into the prompt template. raise NotImplementedError tools_text.append(tool) tools_text = "\n\n".join(tools_text) - + tools_name_text = ", ".join([tool_info["name_for_model"] for tool_info in list_of_tool_info]) - + messages = [{"role": "system", "content": "You are a helpful assistant."}] for i, (query, response) in enumerate(chat_history): if list_of_tool_info: @@ -397,9 +397,9 @@ information and conversation history into the prompt template. messages.append({"role": "user", "content": query}) if response: messages.append({"role": "assistant", "content": response}) - + prompt = tokenizer.apply_chat_template(messages, add_generation_prompt=True, tokenize=False, return_tensors="pt") - + return prompt Create parser @@ -493,7 +493,7 @@ execute them according to the output of LLM. return str(ret) elif tool_name == "image_gen": import urllib.parse - + tool_args = tool_args.replace("(", "").replace(")", "") prompt = json5.loads(tool_args)["prompt"] prompt = urllib.parse.quote(prompt) @@ -503,11 +503,11 @@ execute them according to the output of LLM. ) else: raise NotImplementedError - - + + def llm_with_tool(prompt: str, history, list_of_tool_info=()): chat_history = [(x["user"], x["bot"]) for x in history] + [(prompt, "")] - + planning_prompt = build_input_text(chat_history, list_of_tool_info) text = "" while True: @@ -522,7 +522,7 @@ execute them according to the output of LLM. else: text += output break - + new_history = [] new_history.extend(history) new_history.append({"user": prompt, "bot": text}) @@ -537,7 +537,7 @@ Run agent history = [] query = "get the weather in London, and create a picture of Big Ben based on the weather information" - + response, history = llm_with_tool(prompt=query, history=history, list_of_tool_info=tools) diff --git a/docs/notebooks/llm-chatbot-generate-api-with-output.rst b/docs/notebooks/llm-chatbot-generate-api-with-output.rst index c09b463ae985d0..2c23e3ef3b4f64 100644 --- a/docs/notebooks/llm-chatbot-generate-api-with-output.rst +++ b/docs/notebooks/llm-chatbot-generate-api-with-output.rst @@ -81,9 +81,9 @@ Install required dependencies .. code:: ipython3 import os - + os.environ["GIT_CLONE_PROTECTION_ACTIVE"] = "false" - + %pip install -Uq pip %pip uninstall -q -y optimum optimum-intel %pip install -q -U "openvino>=2024.3.0" openvino-tokenizers[transformers] openvino-genai @@ -103,12 +103,12 @@ Install required dependencies from pathlib import Path import requests import shutil - + # fetch model configuration - + config_shared_path = Path("../../utils/llm_config.py") config_dst_path = Path("llm_config.py") - + if not config_dst_path.exists(): if config_shared_path.exists(): try: @@ -127,7 +127,7 @@ Install required dependencies r = requests.get(url="https://raw.githubusercontent.com/openvinotoolkit/openvino_notebooks/latest/utils/llm_config.py") with open("llm_config.py", "w", encoding="utf-8") as f: f.write(r.text) - + if not Path("notebook_utils.py").exists(): r = requests.get(url="https://raw.githubusercontent.com/openvinotoolkit/openvino_notebooks/latest/utils/notebook_utils.py") open("notebook_utils.py", "w").write(r.text) @@ -238,7 +238,7 @@ Click here to see available models options .. code:: python - # login to huggingfacehub to get access to pretrained model + # login to huggingfacehub to get access to pretrained model from huggingface_hub import notebook_login, whoami @@ -270,7 +270,7 @@ Click here to see available models options .. code:: python - # login to huggingfacehub to get access to pretrained model + # login to huggingfacehub to get access to pretrained model from huggingface_hub import notebook_login, whoami @@ -304,7 +304,7 @@ Click here to see available models options .. code:: python - # login to huggingfacehub to get access to pretrained model + # login to huggingfacehub to get access to pretrained model from huggingface_hub import notebook_login, whoami @@ -338,7 +338,7 @@ Click here to see available models options .. code:: python - # login to huggingfacehub to get access to pretrained model + # login to huggingfacehub to get access to pretrained model from huggingface_hub import notebook_login, whoami @@ -399,7 +399,7 @@ Click here to see available models options .. code:: python - # login to huggingfacehub to get access to pretrained model + # login to huggingfacehub to get access to pretrained model from huggingface_hub import notebook_login, whoami @@ -432,7 +432,7 @@ Click here to see available models options .. code:: python - # login to huggingfacehub to get access to pretrained model + # login to huggingfacehub to get access to pretrained model from huggingface_hub import notebook_login, whoami @@ -466,7 +466,7 @@ Click here to see available models options .. code:: python - # login to huggingfacehub to get access to pretrained model + # login to huggingfacehub to get access to pretrained model from huggingface_hub import notebook_login, whoami @@ -500,7 +500,7 @@ Click here to see available models options .. code:: python - # login to huggingfacehub to get access to pretrained model + # login to huggingfacehub to get access to pretrained model from huggingface_hub import notebook_login, whoami @@ -531,7 +531,7 @@ Click here to see available models options .. code:: python - # login to huggingfacehub to get access to pretrained model + # login to huggingfacehub to get access to pretrained model from huggingface_hub import notebook_login, whoami @@ -644,9 +644,9 @@ Click here to see available models options .. code:: ipython3 from llm_config import get_llm_selection_widget - + form, lang, model_id_widget, compression_variant, use_preconverted = get_llm_selection_widget() - + form @@ -668,7 +668,7 @@ Click here to see available models options .. parsed-literal:: Selected model qwen2-0.5b-instruct with INT4 compression - + Convert model using Optimum-CLI tool ------------------------------------ @@ -676,7 +676,7 @@ Convert model using Optimum-CLI tool `Optimum Intel `__ -is the interface between the +is the interface between the `Transformers `__ and `Diffusers `__ libraries and OpenVINO to accelerate end-to-end pipelines on Intel architectures. @@ -776,28 +776,28 @@ be additionally applied during model export with INT4 precision using .. code:: ipython3 from llm_config import convert_and_compress_model - + model_dir = convert_and_compress_model(model_id, model_configuration, compression_variant.value, use_preconverted.value) .. parsed-literal:: ✅ INT4 qwen2-0.5b-instruct model already converted and can be found in qwen2/INT4_compressed_weights - + Let’s compare model size for different compression types .. code:: ipython3 from llm_config import compare_model_size - + compare_model_size(model_dir) .. parsed-literal:: Size of model with INT4 compressed weights is 358.86 MB - + Select device for inference --------------------------- @@ -807,9 +807,9 @@ Select device for inference .. code:: ipython3 from notebook_utils import device_widget - + device = device_widget(default="CPU", exclude=["NPU"]) - + device @@ -852,14 +852,14 @@ of the available generation parameters more deeply later. .. code:: ipython3 import openvino_genai as ov_genai - + print(f"Loading model from {model_dir}\n") - - + + pipe = ov_genai.LLMPipeline(str(model_dir), device.value) - + generation_config = pipe.get_generation_config() - + input_prompt = "The Sun is yellow bacause" print(f"Input text: {input_prompt}") print(pipe.generate(input_prompt, max_new_tokens=10)) @@ -868,10 +868,10 @@ of the available generation parameters more deeply later. .. parsed-literal:: Loading model from qwen2/INT4_compressed_weights - + Input text: The Sun is yellow bacause it is made of hydrogen and oxygen atoms. The - + Run Chatbot ----------- @@ -932,7 +932,7 @@ history, we should move LLMPipeline to chat mode using ``start_chat()`` method. More info about OpenVINO LLM inference can be found in `LLM Inference -Guide `__ +Guide `__ .. raw:: html @@ -1022,11 +1022,11 @@ Click here to see detailed description of advanced options if not Path("gradio_helper_genai.py").exists(): r = requests.get(url="https://raw.githubusercontent.com/openvinotoolkit/openvino_notebooks/latest/notebooks/llm-chatbot/gradio_helper_genai.py") open("gradio_helper_genai.py", "w").write(r.text) - + from gradio_helper_genai import make_demo - + demo = make_demo(pipe, model_configuration, model_id, lang.value) - + try: demo.launch(debug=True) except Exception: diff --git a/docs/notebooks/llm-chatbot-with-output.rst b/docs/notebooks/llm-chatbot-with-output.rst index 0d214f5cccc0fc..24ebf376309f08 100644 --- a/docs/notebooks/llm-chatbot-with-output.rst +++ b/docs/notebooks/llm-chatbot-with-output.rst @@ -1012,7 +1012,7 @@ Tokenizer class and pipelines API are compatible with Optimum models. You can find more details about OpenVINO LLM inference using HuggingFace Optimum API in `LLM inference -guide `__. +guide `__. .. code:: ipython3 @@ -1109,7 +1109,7 @@ decoding methods in this generation updates conversation history for next conversation step. it makes stronger connection of next question with previously provided and allows user to make clarifications regarding previously provided -answers.https://docs.openvino.ai/2024/learn-openvino/llm_inference_guide.html +answers.https://docs.openvino.ai/2024/openvino-workflow-generative.html | There are several parameters that can control text generation quality: \* ``Temperature`` is a parameter used to control the level of @@ -1160,7 +1160,7 @@ answers.https://docs.openvino.ai/2024/learn-openvino/llm_inference_guide.html heavily than a token that has appeared only one time. A value of 1 means that there is no penalty and values larger than 1 discourage repeated - tokens.https://docs.openvino.ai/2024/learn-openvino/llm_inference_guide.html + tokens.https://docs.openvino.ai/2024/openvino-workflow-generative.html .. code:: ipython3 diff --git a/docs/notebooks/multilora-image-generation-with-output.rst b/docs/notebooks/multilora-image-generation-with-output.rst index 2efe1aaab50908..f6445e5a2ec1f2 100644 --- a/docs/notebooks/multilora-image-generation-with-output.rst +++ b/docs/notebooks/multilora-image-generation-with-output.rst @@ -75,11 +75,11 @@ Guide 0.25.0" pillow "gradio>=4.19" "peft>=0.7.0" %pip install -q "git+https://github.com/huggingface/optimum-intel.git" %pip install -q -U "openvino>=2024.5.0" "openvino-tokenizers>=2024.5.0" "openvino-genai>=2024.5.0" - + if platform.system() == "Darwin": %pip install -q "numpy<2.0.0" @@ -87,16 +87,16 @@ Guide `__ is -the interface between the +the interface between the `Transformers `__ and `Diffusers `__ libraries and OpenVINO to accelerate end-to-end pipelines on Intel architectures. @@ -144,7 +144,7 @@ saved on disk before export. For avoiding this, we will use ``export_from_model`` function that accepts initialized model. Additionally, for using model with OpenVINO GenAI, we need to export tokenizers to OpenVINO format using `OpenVINO -Tokenizers `__ +Tokenizers `__ library. In this tutorial we will use `Stable Diffusion @@ -160,9 +160,9 @@ Diffusion family. from optimum.intel.openvino import OVConfig from optimum.exporters.openvino.convert import export_tokenizer import gc - + model_dir = Path("sdxl-lcm") - + if not model_dir.exists(): model_id = "stabilityai/stable-diffusion-xl-base-1.0" adapter_id = "latent-consistency/lcm-lora-sdxl" @@ -262,17 +262,17 @@ Prepare LoRA Adapters .. code:: ipython3 from lora_config import LORA - + # uncomment this line to see predefined LoRA adapters configuration used in this notebook # LORA .. code:: ipython3 from huggingface_hub import hf_hub_download - + lora_dir = Path("lora") adapter_paths = [] - + for lora in LORA: lora_model_dir = lora_dir / lora["name"].lower().replace(" ", "_") file_name = lora["file_name"] @@ -283,8 +283,8 @@ Prepare LoRA Adapters .. code:: ipython3 import openvino_genai as ov_genai - - + + def prepare_adapter_config(scales=None): if scales is None: scales = [1 / len(adapter_paths)] * len(adapter_paths) @@ -293,10 +293,10 @@ Prepare LoRA Adapters adapter_config = ov_genai.AdapterConfig() for adapter, scale in zip(adapter_paths, scales): adapter_config.add(ov_genai.Adapter(adapter), scale) - + return adapter_config - - + + adapters_config = prepare_adapter_config(0.0) adapters = adapters_config.get_adapters() @@ -312,7 +312,7 @@ denoising. For reproducibility of generation results, we will use .. code:: ipython3 from notebook_utils import device_widget - + device = device_widget(default="CPU", exclude=["NPU"]) device @@ -329,21 +329,21 @@ denoising. For reproducibility of generation results, we will use import openvino as ov import torch - - + + class Generator(ov_genai.Generator): def __init__(self, seed): ov_genai.Generator.__init__(self) self.generator = torch.Generator(device="cpu").manual_seed(seed) - + def next(self): return torch.randn(1, generator=self.generator, dtype=torch.float32).item() - + def randn_tensor(self, shape: ov.Shape): torch_tensor = torch.randn(list(shape), generator=self.generator, dtype=torch.float32) return ov.Tensor(torch_tensor.numpy()) - - + + pipe = ov_genai.Text2ImagePipeline(model_dir, "CPU", adapters=adapters_config) Selection specific adapter during generation @@ -370,7 +370,7 @@ let’s select LoRA for generation images in X-Ray style. .. code:: ipython3 from PIL import Image - + image = Image.fromarray(image_tensor.data[0]) image @@ -396,7 +396,7 @@ modern illustration pointillistic style. prompt_template2 = LORA[2].get("prompt", "") adapter1_weight = LORA[1].get("weight", 1.0) adapter2_weight = LORA[2].get("weight", 1.0) - + prompt = prompt_template2.replace("", prompt_template1.replace("", subject)) adapter_config = ov_genai.AdapterConfig() adapter_config.add(adapters[1], adapter1_weight) @@ -446,7 +446,7 @@ Interactive demo .. code:: ipython3 gradio_helper_path = Path("gradio_helper.py") - + if not gradio_helper_path.exists(): r = requests.get( url="https://raw.githubusercontent.com/openvinotoolkit/openvino_notebooks/latest/notebooks/multilora-image-generation/gradio_helper.py", @@ -456,9 +456,9 @@ Interactive demo .. code:: ipython3 from gradio_helper import make_demo - + demo = make_demo(pipe, Generator, adapters, LORA) - + try: demo.launch(debug=False) except Exception: diff --git a/docs/notebooks/speculative-sampling-with-output.rst b/docs/notebooks/speculative-sampling-with-output.rst index 868fbe9beccf9e..8ca9ca5bc7002c 100644 --- a/docs/notebooks/speculative-sampling-with-output.rst +++ b/docs/notebooks/speculative-sampling-with-output.rst @@ -136,7 +136,7 @@ In case, if you want run own models, you should convert them using Optimum `__ library accelerated by OpenVINO integration. More details about model preparation can be found in `OpenVINO LLM inference -guide `__ +guide `__ .. code:: ipython3 diff --git a/docs/notebooks/text-to-image-genai-with-output.rst b/docs/notebooks/text-to-image-genai-with-output.rst index 126c654405b36a..a0f0af9ef41538 100644 --- a/docs/notebooks/text-to-image-genai-with-output.rst +++ b/docs/notebooks/text-to-image-genai-with-output.rst @@ -23,7 +23,7 @@ the Hugging Face Transformers library to the OpenVINO™ IR format. For more details, refer to the `Hugging Face Optimum Intel documentation `__. 2. Run inference using the `Text-to-Image Generation -pipeline `__ +pipeline `__ from OpenVINO GenAI. @@ -59,19 +59,19 @@ Prerequisites import platform import requests - - + + %pip install -q "git+https://github.com/huggingface/optimum-intel.git" %pip install -q -U "openvino>=2024.5" "openvino-tokenizers>=2024.5" "openvino-genai>=2024.5" %pip install -q Pillow "diffusers>=0.30.3" "gradio>=4.19" "typing_extensions>=4.9" if platform.system() == "Darwin": %pip install -q "numpy<2.0.0" - + r = requests.get( url="https://raw.githubusercontent.com/openvinotoolkit/openvino_notebooks/latest/utils/notebook_utils.py", ) open("notebook_utils.py", "w").write(r.text) - + r = requests.get( url="https://raw.githubusercontent.com/openvinotoolkit/openvino_notebooks/latest/utils/cmd_helper.py", ) @@ -83,7 +83,7 @@ Convert model using Optimum-CLI tool `Optimum Intel `__ -is the interface between the +is the interface between the `Transformers `__ and `Diffusers `__ libraries and OpenVINO to accelerate end-to-end pipelines on Intel architectures. @@ -118,12 +118,12 @@ wrapper over cli-command. .. code:: ipython3 from pathlib import Path - + from cmd_helper import optimum_cli - - + + model_dir = Path("dreamlike_anime_1_0_ov") - + if not model_dir.exists(): optimum_cli("dreamlike-art/dreamlike-anime-1.0", model_dir) @@ -137,8 +137,8 @@ select device from dropdown list for running inference using OpenVINO .. code:: ipython3 from notebook_utils import device_widget - - + + device = device_widget("CPU", exclude=["NPU"]) device @@ -163,27 +163,27 @@ That’s it:) import openvino as ov from PIL import Image import torch - - + + class Generator(ov_genai.Generator): def __init__(self, seed): ov_genai.Generator.__init__(self) self.generator = torch.Generator(device="cpu").manual_seed(seed) - + def next(self): return torch.randn(1, generator=self.generator, dtype=torch.float32).item() - + def randn_tensor(self, shape: ov.Shape): torch_tensor = torch.randn(list(shape), generator=self.generator, dtype=torch.float32) return ov.Tensor(torch_tensor.numpy()) - - + + random_generator = Generator(42) # openvino_genai.CppStdGenerator can be used to have same images as C++ sample pipe = ov_genai.Text2ImagePipeline(model_dir, device.value) prompt = "anime, masterpiece, high quality, a green snowman with a happy smiling face in the snows" - + image_tensor = pipe.generate(prompt, width=512, height=512, num_inference_steps=20, num_images_per_prompt=1, generator=random_generator) - + image = Image.fromarray(image_tensor.data[0]) .. code:: ipython3 @@ -230,20 +230,20 @@ from command line: def prepare_adapter_config(adapters): adapter_config = ov_genai.AdapterConfig() - + # Multiple LoRA adapters applied simultaneously are supported, parse them all and corresponding alphas from cmd parameters: for i in range(int(len(adapters) / 2)): adapter = ov_genai.Adapter(adapters[2 * i]) alpha = float(adapters[2 * i + 1]) adapter_config.add(adapter, alpha) - + return adapter_config - - + + adapter_config = prepare_adapter_config(["soulcard.safetensors", 0.5]) - + pipe = ov_genai.Text2ImagePipeline(model_dir, device.value, adapters=adapter_config) - + image_tensor = pipe.generate(prompt, generator=Generator(42), width=512, height=512, num_inference_steps=20) image = Image.fromarray(image_tensor.data[0]) @@ -270,10 +270,10 @@ Interactive demo .. code:: ipython3 from gradio_helper import make_demo - - + + demo = make_demo(pipe, Generator, adapter_config) - + try: demo.launch(debug=True) except Exception: diff --git a/docs/sphinx_setup/_static/selector-tool/assets/selector-DiE3WrtX.js b/docs/sphinx_setup/_static/selector-tool/assets/selector-DiE3WrtX.js index 264f23f1dd17e3..568dd5dad034f2 100644 --- a/docs/sphinx_setup/_static/selector-tool/assets/selector-DiE3WrtX.js +++ b/docs/sphinx_setup/_static/selector-tool/assets/selector-DiE3WrtX.js @@ -56,4 +56,4 @@ enabled=1 gpgcheck=1 repo_gpgcheck=1 gpgkey=https://yum.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB -EOF`,getMoveRepoFileCommand:e=>`sudo mv /tmp/openvino-${e.metadata.yumYear}.repo ${Zu}`,verifyRepoCommand:"yum repolist | grep -i openvino",getInstallCommand:e=>`sudo yum install openvino-${e.metadata.yumVersion}`};class Sv extends Ae{constructor(t){super({level:T.DISTRIBUTION,key:A.ZYPPER,metadata:{title:"ZYPPER",subtitle:de("distributions.CAPIOnly")}}),this._data=t}get data(){return{...this._data,commands:xv}}}const xv={addRepo:"sudo zypper addrepo https://download.opensuse.org/repositories/science/openSUSE_Tumbleweed/science.repo",refresh:"sudo zypper refresh",getInstallCommand:({metadata:e})=>`sudo zypper install openvino-devel-${e.zypperVersion} openvino-sample-${e.zypperVersion}`};class aa extends Xr{constructor(t,n,r){super({level:T.OP_SYSTEM,key:t,metadata:n},r),this._setDefaultOS()}_setDefaultOS(){const t=this._detectOS()||Qe.WINDOWS;this.key===t&&this.default()}_detectOS(){const{userAgent:t}=navigator,n={windows:/(Windows|Win)/g,macOS:/(Macintosh|Mac)/g,linux:/(Linux|X11)/g};return n.windows.test(t)?Qe.WINDOWS:n.macOS.test(t)?Qe.MACOS:n.linux.test(t)?Qe.LINUX:null}}class Zr extends aa{constructor(t){super(Qe.WINDOWS,Mm,t)}}class qr extends aa{constructor(t){super(Qe.MACOS,Bm,t)}}class ei extends aa{constructor(t){super(Qe.LINUX,Km,t)}}const Ov=new ei([new Ee({linksSet:{installation:"https://docs.openvino.ai/nightly/get-started/install-openvino/install-openvino-pip.html"}},{pythonAPI:!0}).includesNPUPlugin().default(),new Pe({linksSet:{installation:"https://docs.openvino.ai/nightly/get-started/install-openvino/install-openvino-archive-linux.html"},downloadLink:"https://storage.openvinotoolkit.org/repositories/openvino/packages/nightly/latest"}),new _e,new Ne]),Pv=new qr([new Ee({linksSet:{installation:"https://docs.openvino.ai/nightly/get-started/install-openvino/install-openvino-pip.html"}},{pythonAPI:!0}).default(),new Pe({linksSet:{installation:"https://docs.openvino.ai/nightly/get-started/install-openvino/install-openvino-archive-macos.html"},downloadLink:"https://storage.openvinotoolkit.org/repositories/openvino/packages/nightly/latest"}),new _e,new Ne]),Nv=new Zr([new Ee({linksSet:{installation:"https://docs.openvino.ai/nightly/get-started/install-openvino/install-openvino-pip.html"}},{pythonAPI:!0}).includesNPUPlugin().default(),new Pe({linksSet:{installation:"https://docs.openvino.ai/nightly/get-started/install-openvino/install-openvino-archive-windows.html"},downloadLink:"https://storage.openvinotoolkit.org/repositories/openvino/packages/nightly/latest"}),new _e,new Ne]),_v=new Cd([Nv,Pv,Ov]),Ev=new ei([new Ee({linksSet:{installation:"https://docs.openvino.ai/2023.3/openvino_docs_install_guides_installing_openvino_pip.html"}},{pythonAPI:!0}).default(),new Pe({linksSet:{installation:"https://docs.openvino.ai/2023.3/openvino_docs_install_guides_installing_openvino_from_archive_linux.html"},downloadLink:"https://storage.openvinotoolkit.org/repositories/openvino/packages/2023.3/linux"}).includesNPUPlugin(),new Id({linksSet:{installation:"https://docs.openvino.ai/2023.3/openvino_docs_install_guides_installing_openvino_apt.html"},os:[re.UBUNTU_18,re.UBUNTU_20,re.UBUNTU_22]}),new Td({linksSet:{installation:"https://docs.openvino.ai/2023.3/openvino_docs_install_guides_installing_openvino_yum.html"}}),new _e,new Ne,new ir({linksSet:{installation:"https://docs.openvino.ai/2023.3/openvino_docs_install_guides_installing_openvino_docker.html"},downloadLink:"https://docs.openvino.ai/2023.3/openvino_docs_install_guides_installing_openvino_docker.html"}),new rr({linksSet:{installation:"https://docs.openvino.ai/2023.3/openvino_docs_install_guides_installing_openvino_conda.html"}}),new nr({linksSet:{installation:"https://docs.openvino.ai/2023.3/openvino_docs_install_guides_installing_openvino_conan.html"}})]),Cv=new qr([new Ee({linksSet:{installation:"https://docs.openvino.ai/2023.3/openvino_docs_install_guides_installing_openvino_pip.html"}},{pythonAPI:!0}).default(),new Pe({linksSet:{installation:"https://docs.openvino.ai/2023.3/openvino_docs_install_guides_installing_openvino_from_archive_macos.html"},downloadLink:"https://storage.openvinotoolkit.org/repositories/openvino/packages/2023.3/macos"}),new _e,new Ne,new rr({linksSet:{installation:"https://docs.openvino.ai/2023.3/openvino_docs_install_guides_installing_openvino_conda.html"}}),new nr({linksSet:{installation:"https://docs.openvino.ai/2023.3/openvino_docs_install_guides_installing_openvino_conan.html"}})]),jv=new Zr([new Ee({linksSet:{installation:"https://docs.openvino.ai/2023.3/openvino_docs_install_guides_installing_openvino_pip.html"}},{pythonAPI:!0}).default(),new Pe({linksSet:{installation:"https://docs.openvino.ai/2023.3/openvino_docs_install_guides_installing_openvino_from_archive_windows.html"},downloadLink:"https://storage.openvinotoolkit.org/repositories/openvino/packages/2023.3/windows"}).includesNPUPlugin(),new _e,new Ne,new ir({linksSet:{installation:"https://docs.openvino.ai/2023.3/openvino_docs_install_guides_installing_openvino_docker.html"},downloadLink:"https://docs.openvino.ai/2023.3/openvino_docs_install_guides_installing_openvino_docker.html"}),new rr({linksSet:{installation:"https://docs.openvino.ai/2023.3/openvino_docs_install_guides_installing_openvino_conda.html"}}),new nr({linksSet:{installation:"https://docs.openvino.ai/2023.3/openvino_docs_install_guides_installing_openvino_conan.html"}})]),Iv=new Xm([jv,Cv,Ev]),Lv=new ei([new Ee({linksSet:{installation:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-pip.html"}},{pythonAPI:!0}).includesNPUPlugin().default(),new Pe({linksSet:{installation:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-archive-linux.html"},downloadLink:"https://storage.openvinotoolkit.org/repositories/openvino/packages/2024.6/linux"}).includesNPUPlugin(),new Id({linksSet:{installation:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-apt.html"},os:[re.UBUNTU_20,re.UBUNTU_22,re.UBUNTU_24]}),new Td({linksSet:{installation:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-yum.html"}}),new _e,new Ne,new ir({linksSet:{installation:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-docker-linux.html"},downloadLink:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-docker-linux.html"}),new rr({linksSet:{installation:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-conda.html"}}),new Ld({linksSet:{installation:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-brew.html"}}),new la({linksSet:{installation:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-vcpkg.html"}}),new nr({linksSet:{installation:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-conan.html"}}),new sa({linksSet:{installation:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-npm.html"}}),new Sv({linksSet:{installation:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-zypper.html"}}),new yv({linksSet:{installation:"https://docs.openvino.ai/2024/openvino-workflow/deployment-locally/integrate-openvino-with-ubuntu-snap.html"},downloadLink:"https://docs.openvino.ai/2024/openvino-workflow/deployment-locally/integrate-openvino-with-ubuntu-snap.html"})]),Rv=new qr([new Ee({linksSet:{installation:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-pip.html"}},{pythonAPI:!0}).default(),new Pe({linksSet:{installation:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-archive-macos.html"},downloadLink:"https://storage.openvinotoolkit.org/repositories/openvino/packages/2024.6/macos"}),new _e,new Ne,new rr({linksSet:{installation:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-conda.html"}}),new Ld({linksSet:{installation:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-brew.html"}}),new la({linksSet:{installation:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-vcpkg.html"}}),new nr({linksSet:{installation:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-conan.html"}}),new sa({linksSet:{installation:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-npm.html"}})]),Tv=new Zr([new Ee({linksSet:{installation:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-pip.html"}},{pythonAPI:!0}).includesNPUPlugin().default(),new Pe({linksSet:{installation:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-archive-windows.html"},downloadLink:"https://storage.openvinotoolkit.org/repositories/openvino/packages/2024.6/windows"}).includesNPUPlugin(),new _e,new Ne,new ir({linksSet:{installation:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-docker-linux.html"},downloadLink:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-docker-linux.html"}),new rr({linksSet:{installation:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-conda.html"}}),new la({linksSet:{installation:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-vcpkg.html"}}),new nr({linksSet:{installation:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-conan.html"}}),new sa({linksSet:{installation:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-npm.html"}})]),Av=new jd([Tv,Rv,Lv]),Dv=new Gm([Av.default(),_v,Iv]),Uv=new ei([new Ee({linksSet:{installation:"https://docs.openvino.ai/nightly/get-started/install-openvino/install-openvino-genai.html#pypi-installation"}},{pythonAPI:!0}).includesNPUPlugin().default(),new Pe({linksSet:{installation:"https://docs.openvino.ai/nightly/get-started/install-openvino/install-openvino-genai.html#archive-installation"},downloadLink:"https://storage.openvinotoolkit.org/repositories/openvino_genai/packages/nightly/latest"}),new _e,new Ne]),Fv=new qr([new Ee({linksSet:{installation:"https://docs.openvino.ai/nightly/get-started/install-openvino/install-openvino-genai.html#pypi-installation"}},{pythonAPI:!0}).default(),new Pe({linksSet:{installation:"https://docs.openvino.ai/nightly/get-started/install-openvino/install-openvino-genai.html#archive-installation"},downloadLink:"https://storage.openvinotoolkit.org/repositories/openvino_genai/packages/nightly/latest"}),new _e,new Ne]),zv=new Zr([new Ee({linksSet:{installation:"https://docs.openvino.ai/nightly/get-started/install-openvino/install-openvino-genai.html#pypi-installation"}},{pythonAPI:!0}).includesNPUPlugin().default(),new Pe({linksSet:{installation:"https://docs.openvino.ai/nightly/get-started/install-openvino/install-openvino-genai.html#archive-installation"},downloadLink:"https://storage.openvinotoolkit.org/repositories/openvino_genai/packages/nightly/latest"}),new _e,new Ne]),Vv=new Cd([zv,Fv,Uv]),bv=new ei([new Ee({linksSet:{installation:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-genai.html"}},{pythonAPI:!0}).includesNPUPlugin().default(),new Pe({linksSet:{installation:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-genai.html"},downloadLink:"https://storage.openvinotoolkit.org/repositories/openvino_genai/packages/2024.6/linux"}).includesNPUPlugin(),new _e,new Ne,new ir({linksSet:{installation:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-docker-linux.html"},downloadLink:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-docker-linux.html"})]),$v=new qr([new Ee({linksSet:{installation:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-genai.html"}},{pythonAPI:!0}).default(),new Pe({linksSet:{installation:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-genai.html"},downloadLink:"https://storage.openvinotoolkit.org/repositories/openvino_genai/packages/2024.6/macos"}),new _e,new Ne]),Mv=new Zr([new Ee({linksSet:{installation:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-genai.html"}},{pythonAPI:!0}).includesNPUPlugin().default(),new Pe({linksSet:{installation:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-genai.html"},downloadLink:"https://storage.openvinotoolkit.org/repositories/openvino_genai/packages/2024.6/windows"}).includesNPUPlugin(),new _e,new Ne,new ir({linksSet:{installation:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-docker-linux.html"},downloadLink:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-docker-linux.html"})]),Bv=new jd([Mv,$v,bv]),Kv=new Wm([Bv.default(),Vv]),Hv=new Xr({level:T.ROOT,key:_d.ROOT,metadata:{title:"ROOT"}},[Dv.default(),Kv]).default();function Gv(e,t){var i,o;if(t.key===A.DOCKER||!t.footnoteLevel)return e;const n=(i=e[t.footnoteLevel])==null?void 0:i.selected,r=(o=e[t.footnoteLevel])==null?void 0:o.nodes;return!n||!r||((Array.isArray(n)?[...n,...r]:[n]).forEach(s=>s.hasFootnote=!0),r.forEach(s=>s.checked&&(s.hasFootnote=!0))),e}class Wv{constructor(){ze(this,"_root",Hv)}getState(){try{return this._getState()}catch(t){return console.error(t),this._selectDefaults(this._root),this._getState()}}_getState(){const t=this._root.children,n=this._get_selected(t),r=n.children,i=this._get_selected(r),{systems:o,system:s}=this._processVersion(i),l=s.children,a=this._get_selected(l),c={[T.PACKAGE]:{nodes:t.map(p=>p.toOption()),selected:n.toOption()},[T.VERSION]:{nodes:r.map(p=>p.toOption()),selected:i.toOption()},[T.OP_SYSTEM]:{nodes:o.map(p=>p.toOption()),selected:s.toOption()},[T.DISTRIBUTION]:{nodes:l.map(p=>p.toOption()),selected:a.toOption()}};return Gv(c,a)}_get_selected(t){t.some(({checked:r})=>r)||this._selectDefaultsForLevel(t[0].level);const n=t.find(({checked:r})=>r);if(!n)throw new Error("Not valid tree");return n}_processVersion(t){const n=t.children,r=this._get_selected(n);return{systems:n,system:r}}setState(t){this._setState(t)}_setState(t,n=this._root){if(!n.children.length)return;const r=n.children[0].level,i=Yv(t[r]);n.children.forEach(o=>o.checked=i.includes(o.key)),n.children.forEach(o=>this._setState(t,o))}select(t,n){return this._select(t,n),this.getState()}_select(t,n,r=this._root){var i;if(((i=r.children[0])==null?void 0:i.level)!==t){r.children.forEach(o=>this._select(t,n,o));return}if(r.childrenSelector){r.childrenSelector(r.children,n);return}r.children.forEach(o=>o.checked=o.key===n)}_selectDefaultsForLevel(t,n=this._root){if(n.children.length){if(n.children[0].level!==t){n.children.forEach(r=>this._selectDefaultsForLevel(t,r));return}this._selectDefaults(n)}}_selectDefaults(t){t.children.forEach(n=>{n.checked=n.isDefault,this._selectDefaults(n)})}}const _n=new Wv;function Yv(e){const t=[];return Array.isArray(e)?t.push(...e):e&&t.push(e),t}function Ad(e,{serializeVersion:t}={serializeVersion:!0}){const n=[[T.PACKAGE,e.PACKAGE.selected.key],[T.VERSION,t?e.VERSION.selected.key:null],[T.OP_SYSTEM,e.OP_SYSTEM.selected.key],[T.DISTRIBUTION,e.DISTRIBUTION.selected.key]],r=new URLSearchParams;for(const[i,o]of n)o&&r.set(i,o);return r}function Dd(e){function t(n,r){const i=e.get(n);if(!i)throw new Error(`Cannot extract value for: ${n}`);if(!r[i])throw new Error(`Bad node key for: ${n}`);return r[i]}try{return{[T.PACKAGE]:t(T.PACKAGE,Se),[T.VERSION]:e.has(T.VERSION)?t(T.VERSION,wn):null,[T.OP_SYSTEM]:t(T.OP_SYSTEM,Qe),[T.DISTRIBUTION]:t(T.DISTRIBUTION,A)}}catch(n){return console.log(`Cannot restore state from url due to error "${n}"`),null}}function Qv(){const e=window.parent;if(!e.location.search)return null;const t=new URLSearchParams(e.location.search);return Dd(t)}function Jv(e,t,n,{serializeVersion:r}={serializeVersion:!0}){F.useEffect(()=>{const i=window.parent,o=Ad(t,{serializeVersion:r}).toString(),s=new URL(i.location.toString());if(!s.search){s.search=o,i.history.replaceState(null,"",s);return}s.search.slice(1)!==o&&(s.search=o,i.history.pushState(null,"",s))}),parent.onpopstate=()=>{const i=window.parent,o=new URLSearchParams(i.location.search),s=Dd(o);s&&(e.setState(s),n(e.getState()))}}const os=function(e){let t,n=!1;return function(...r){return n||(t=e(r),n=!0),t}};function Xv(e){var t,n;return typeof((n=(t=e.wap_tms)==null?void 0:t.custom)==null?void 0:n.trackComponentClick)!="function"?null:e.wap_tms.custom.trackComponentClick.bind(e.wap_tms.custom)}class Zv{constructor(){ze(this,"_window");ze(this,"_consoleNotification",{notInitialized:os(()=>console.log("Adobe analytics is not initialized")),notFound:os(()=>console.log("Adobe analytics not found on a page")),devMode:os(()=>console.log("Analytics in dev mode"))});ze(this,"_send",t=>{if(!this._window){this._consoleNotification.notInitialized();return}const n=Ad(_n.getState()).toString(),r=Xv(this._window);if(!r){this._consoleNotification.notFound();return}try{r(t,n)}catch(i){console.error(i)}})}initialize(t){this._window=t}install(){this._send("install")}combinationView(){this._send("combination-view")}}const He=new Zv;function qv(){const e=Qv();e&&_n.setState(e);const t=F.createContext((r,i)=>{_n.select(r,i)});function n(){const[r,i]=F.useState(_n.getState());return Jv(_n,r,i),[r,(o,s)=>i(_n.select(o,s))]}return{SelectorContext:t,useSelector:n}}async function ey(e){e&&(navigator.clipboard?await navigator.clipboard.writeText(e):ty(e))}function ty(e){const t=ny(e);document.body.append(t),t.select(),document.execCommand("copy"),t.remove()}function ny(e){const t=document.createElement("textarea");t.style.fontSize="12pt",t.style.border="0",t.style.padding="0",t.style.margin="0",t.style.position="absolute",t.style.left="-9999px";const n=window.pageYOffset||document.documentElement.scrollTop;return t.style.top=`${n}px`,t.setAttribute("readonly",""),t.value=e,t}function ry(){return m.jsxs("svg",{version:"1.1",width:"24",height:"24",viewBox:"0 0 205 205",xmlns:"http://www.w3.org/2000/svg",className:"svg-icon",children:[m.jsx("path",{fill:"none",stroke:"currentColor",strokeWidth:"10",d:"M 50 145 a 15 15 0 0 1 -15 -15 v -90 a 15 15 0 0 1 15 -15 h 70 a 15 15 0 0 1 15 15 v 5"}),m.jsx("rect",{x:"65",y:"60",width:"100",height:"120",rx:"15",fill:"none",stroke:"currentColor",strokeWidth:"10"})]})}function iy(){return m.jsx("svg",{version:"1.1",width:"24",height:"24",viewBox:"0 0 200 200",xmlns:"http://www.w3.org/2000/svg",className:"svg-icon",children:m.jsx("path",{strokeLinejoin:"round",strokeLinecap:"round",fill:"none",stroke:"currentColor",strokeWidth:"15",d:"M 40 100 L 90 150 L 170 40"})})}const b=({comment:e,command:t,onCopy:n})=>{const[r,i]=F.useState(!1),o=async()=>{r||(await ey(t),i(!0),setTimeout(()=>i(!1),1500),n==null||n())};return m.jsxs("div",{className:"st-code-snippet","data-cy":"instructions-step",children:[e&&m.jsx(Ud,{children:e}),m.jsxs("div",{"data-cy":"command",children:[t&&m.jsx("code",{className:"st-code-snippet-content",children:t}),t&&m.jsx("button",{className:"copy-button",type:"button","aria-label":"Copy","data-cy":"copy",onClick:o,children:r?m.jsx(iy,{}):m.jsx(ry,{})})]})]})},Ud=({children:e})=>m.jsxs("pre",{className:"st-code-snippet-comment",children:["# ",e]}),oy=({comment:e,snippets:t})=>m.jsxs("div",{className:"st-code-snippet-multi-line","data-cy":"command",children:[e&&m.jsx(Ud,{children:e}),t.map(n=>m.jsx(b,{...n},n.command))]});function sy(e){return e.host==="docs.openvino.ai"}const ss="production.docs.en",Fd=(ss==null?void 0:ss.includes("idz"))||!1,ls={link:"spark-hyperlink spark-hyperlink-primary spark-hyperlink-standard spark-focus-visible spark-focus-visible-self spark-focus-visible-snap spark-focus-visible-background",button:"spark-button spark-button-action spark-button-size-m spark-focus-visible spark-focus-visible-self spark-focus-visible-snap",buttonContent:"spark-button-content"},we=({href:e,children:t,type:n="link",testId:r="link",onClick:i})=>{const o=!Fd&&sy(new URL(e))?"_parent":"_blank";return n==="link"?m.jsx("a",{href:e,className:ls.link,target:o,rel:"noreferrer noopener","data-cy":r,onClick:()=>i==null?void 0:i(),children:t}):m.jsx("span",{className:ls.button,children:m.jsx("span",{className:ls.buttonContent,children:m.jsx("a",{href:e,target:o,rel:"noreferrer noopener","data-cy":r,onClick:()=>i==null?void 0:i(),children:t})})})},ly={heading:"spark-heading spark-font-200"},De=({title:e,accent:t=!1,dashed:n=!1,children:r,testId:i})=>m.jsxs("div",{className:`st-section ${t?"st-section-accent":""} ${n?"st-section-dashed":""}`,"data-cy":i,children:[m.jsx("span",{className:`st-section-title ${ly.heading}`,children:e}),m.jsx("div",{className:"st-section-content",children:F.Children.map(r,o=>m.jsx(ay,{children:o}))})]}),ay=({children:e})=>m.jsx("div",{className:"st-section-content-row",children:e}),uy=({version:e,distribution:t})=>{const{t:n}=M("translation",{keyPrefix:"distributions.apt.steps"}),{t:r}=M("translation",{keyPrefix:"selectorForm.titles"}),{commands:i}=t.data,o={comment:m.jsxs(z,{ns:"translation",i18nKey:"distributions.apt.steps.addRepository",children:[m.jsx("b",{children:"Step 3:"})," Add the repository via the following command"]}),snippets:i.getAddRepositoryCommands(e,t.data.os).map(({ubuntuVersionNumber:l,command:a})=>({comment:`Ubuntu ${l}`,command:a}))},s={downloadKey:{comment:m.jsxs(z,{t:n,i18nKey:"download",values:{filename:i.keyFilename},children:[m.jsx("b",{children:"Step 1:"})," Download the ",m.jsx(we,{href:i.keyHref,children:i.keyFilename}),". You can also use the following command"]}),command:i.downloadKeyCommand},addKey:{comment:m.jsxs(z,{t:n,i18nKey:"addKey",children:[m.jsx("b",{children:"Step 2:"})," Add this key to the system keyring"]}),command:i.addKeyCommand},addRepository:o,updatePackages:{comment:m.jsxs(z,{t:n,i18nKey:"updateList",children:[m.jsx("b",{children:"Step 4:"})," Update the list of packages via the update command"]}),command:i.updatePackagesCommand},verifyAptCache:{comment:m.jsxs(z,{t:n,i18nKey:"verify",children:[m.jsx("b",{children:"Step 5:"})," Verify that the APT repository is properly set up. Run the apt-cache command to see a list of all available OpenVINO packages and components"]}),command:i.verifyAptCacheCommand},install:{comment:m.jsxs(z,{t:n,i18nKey:"install",children:[m.jsx("b",{children:"Step 6:"})," Install OpenVINO Runtime"]}),command:i.getInstallCommand(e),onCopy:()=>He.install()}};return m.jsxs(De,{title:r("install"),accent:!0,testId:"instructions",children:[m.jsx(b,{...s.downloadKey}),m.jsx(b,{...s.addKey}),m.jsx(oy,{...s.addRepository}),m.jsx(b,{...s.updatePackages}),m.jsx(b,{...s.verifyAptCache}),m.jsx(b,{...s.install})]})},cy=({distribution:e})=>{const{t}=M("translation",{keyPrefix:"distributions.brew.steps"}),{t:n}=M("translation",{keyPrefix:"selectorForm.titles"}),{commands:r}=e.data,i={install:{comment:m.jsx(z,{t,i18nKey:"install",children:"Download and install the package"}),command:r.install,onCopy:()=>He.install()}};return m.jsx(De,{title:n("install"),accent:!0,testId:"instructions",children:m.jsx(b,{...i.install})})},fy=({version:e,distribution:t})=>{const{t:n}=M("translation",{keyPrefix:"distributions.conan.steps"}),{t:r}=M("translation",{keyPrefix:"selectorForm.titles"}),{commands:i}=t.data,{txtFilename:o,cmakeFilename:s}=i,l={createConanFile:{comment:m.jsxs(z,{t:n,i18nKey:"createConanFile",values:{txtFilename:o},children:[m.jsx("b",{children:"Step 1:"})," Create a ",m.jsx("b",{children:o})," file for your OpenVINO project and add “openvino” dependency in there"]}),command:i.conanTXTContent(e)},install:{comment:m.jsxs(z,{t:n,i18nKey:"install",values:{cmakeFilename:s},children:[m.jsx("b",{children:"Step 2:"})," Run the command below to create ",m.jsx("b",{children:s})," file, which will be used to compile your project with OpenVINO"]}),command:i.install,onCopy:()=>He.install()},compile:{comment:m.jsxs(z,{t:n,i18nKey:"compile",children:[m.jsx("b",{children:"Step 3:"})," Configure and compile your project with OpenVINO"]}),command:i.compile}};return m.jsxs(De,{title:r("install"),accent:!0,testId:"instructions",children:[m.jsx(b,{...l.createConanFile}),m.jsx(b,{...l.install}),m.jsx(b,{...l.compile})]})},dy=({version:e,distribution:t})=>{const{t:n}=M("translation",{keyPrefix:"distributions.conda.steps"}),{t:r}=M("translation",{keyPrefix:"selectorForm.titles"}),{commands:i}=t.data,o={createEnv:{comment:m.jsxs(z,{t:n,i18nKey:"createEnv",children:[m.jsx("b",{children:"Step 1:"})," Create the Anaconda environment (Python 3.10 used as an example)"]}),command:i.createEnv},activateEnv:{comment:m.jsxs(z,{t:n,i18nKey:"activateEnv",children:[m.jsx("b",{children:"Step 2:"})," Activate the Anaconda environment"]}),command:i.activateEnv},upgradePip:{comment:m.jsxs(z,{t:n,i18nKey:"update",children:[m.jsx("b",{children:"Step 3:"})," Update the Anaconda to latest version"]}),command:i.update},install:{comment:m.jsxs(z,{t:n,i18nKey:"install",children:[m.jsx("b",{children:"Step 4:"})," Download and install the package"]}),command:i.getInstall(e),onCopy:()=>He.install()}};return m.jsxs(De,{title:r("install"),accent:!0,testId:"instructions",children:[m.jsx(b,{...o.createEnv}),m.jsx(b,{...o.activateEnv}),m.jsx(b,{...o.upgradePip}),m.jsx(b,{...o.install})]})},as=({ovPackage:e,distribution:t})=>{const{t:n}=M("translation",{keyPrefix:"distributions.download"}),{t:r}=M("translation",{keyPrefix:"selectorForm.titles"}),i={[A.ARCHIVE]:e.key===Se.OPENVINO_BASE?n("downloadArchives"):n("downloadArchivesGenAI"),[A.DOCKER]:n("gotoDocker"),[A.SNAP]:n("gotoInstallInstruction")}[t.key],o=m.jsxs(m.Fragment,{children:[n("useFollowingLink"),m.jsx("br",{}),m.jsx("b",{children:m.jsx(we,{href:t.data.downloadLink,testId:"download-button",onClick:()=>He.install(),children:i})})]});return m.jsx(De,{title:r("install"),accent:!0,testId:"instructions",children:m.jsx(b,{comment:o})})},py=({ovPackage:e,version:t,distribution:n})=>{const{t:r}=M("translation",{keyPrefix:"distributions.githubGitee"}),{t:i}=M("translation",{keyPrefix:"selectorForm.titles"}),o={clone:{comment:m.jsxs(z,{t:r,i18nKey:"steps.useGitClone",children:[m.jsx("b",{children:"Step 1:"})," Use Git to clone the OpenVINO toolkit repository"]}),command:n.data.commands.getCloneCommand(e,t),onCopy:()=>He.install()},build:{comment:m.jsxs(z,{t:r,i18nKey:"steps.buildInstructions",children:[m.jsx("b",{children:"Step 2:"})," Follow the ",m.jsx(we,{href:n.data.links.getBuildInstructionsLink(e,t),testId:"build-instructions-link",children:"instructions to build from source"})]})}};return m.jsxs(De,{title:i("install"),accent:!0,testId:"instructions",children:[m.jsx(b,{...o.clone}),m.jsx(b,{...o.build})]})},hy=({distribution:e,version:t})=>{const{t:n}=M("translation",{keyPrefix:"distributions.npm.steps"}),{t:r}=M("translation",{keyPrefix:"selectorForm.titles"}),{commands:i}=e.data,o={install:{comment:m.jsx(z,{t:n,i18nKey:"install",children:"Download and install the package"}),command:i.getInstall(t),onCopy:()=>He.install()}};return m.jsx(De,{title:r("install"),accent:!0,testId:"instructions",children:m.jsx(b,{...o.install})})},gy=({ovPackage:e,os:t,version:n,distribution:r})=>{const{t:i}=M("translation",{keyPrefix:"distributions.pip.steps"}),{t:o}=M("translation",{keyPrefix:"selectorForm.titles"}),{commands:s}=r.data,l=s.getCreateVenvCommand(t,n),a=s.getActivateVenvCommand(t,n),c=s.getInstallCommand({ovPackage:e,os:t,version:n}),p={createEnv:{comment:m.jsxs(z,{t:i,i18nKey:"createVenv",children:[m.jsx("b",{children:"Step 1:"})," Create virtual environment"]}),command:l},activateEnv:{comment:m.jsxs(z,{t:i,i18nKey:"activateVenv",children:[m.jsx("b",{children:"Step 2:"})," Activate virtual environment"]}),command:a},upgradePip:{comment:m.jsxs(z,{t:i,i18nKey:"upgradePip",children:[m.jsx("b",{children:"Step 3:"})," Upgrade pip to latest version"]}),command:s.upgradeCommand},install:{comment:m.jsxs(z,{t:i,i18nKey:"install",children:[m.jsx("b",{children:"Step 4:"})," Download and install the package"]}),command:c,onCopy:()=>He.install()}};return m.jsxs(De,{title:o("install"),accent:!0,testId:"instructions",children:[m.jsx(b,{...p.createEnv}),m.jsx(b,{...p.activateEnv}),m.jsx(b,{...p.upgradePip}),m.jsx(b,{...p.install})]})},my=({distribution:e})=>{const{t}=M("translation",{keyPrefix:"distributions.vcpkg.steps"}),{t:n}=M("translation",{keyPrefix:"selectorForm.titles"}),{commands:r}=e.data,i={install:{comment:m.jsx(z,{t,i18nKey:"install",children:"Download and install the package"}),command:r.install,onCopy:()=>He.install()}};return m.jsx(De,{title:n("install"),accent:!0,testId:"instructions",children:m.jsx(b,{...i.install})})},vy=({version:e,distribution:t})=>{const{t:n}=M("translation",{keyPrefix:"distributions.yum.steps"}),{t:r}=M("translation",{keyPrefix:"selectorForm.titles"}),{yumYear:i}=e.metadata,{commands:o}=t.data,s={createRepo:{comment:m.jsxs(z,{t:n,i18nKey:"createRepoFile",children:[m.jsx("b",{children:"Step 1:"})," Create the YUM repo file in the /tmp directory as a normal user"]}),command:o.getCreateRepoCommand(e)},moveRepoFile:{comment:m.jsxs(z,{t:n,i18nKey:"moveFile",values:{year:i,directory:o.directory},children:[m.jsx("b",{children:"Step 2:"})," Move the new openvino-",{year:i},".repo file to the YUM configuration directory ",m.jsx("b",{children:o.directory})]}),command:o.getMoveRepoFileCommand(e)},verifyRepo:{comment:m.jsxs(z,{t:n,i18nKey:"verify",children:[m.jsx("b",{children:"Step 3:"})," Verify that the new repo is properly setup by running the following command"]}),command:o.verifyRepoCommand},install:{comment:m.jsxs(z,{t:n,i18nKey:"install",children:[m.jsx("b",{children:"Step 4:"})," Install OpenVINO Runtime"]}),command:o.getInstallCommand(e),onCopy:()=>He.install()}};return m.jsxs(De,{title:r("install"),accent:!0,testId:"instructions",children:[m.jsx(b,{...s.createRepo}),m.jsx(b,{...s.moveRepoFile}),m.jsx(b,{...s.verifyRepo}),m.jsx(b,{...s.install})]})},yy=({version:e,distribution:t})=>{const{t:n}=M("translation",{keyPrefix:"distributions.zypper.steps"}),{t:r}=M("translation",{keyPrefix:"selectorForm.titles"}),{commands:i}=t.data,o={addRepo:{comment:m.jsxs(z,{t:n,i18nKey:"addRepo",children:[m.jsx("b",{children:"Step 1:"})," Create a ZYPPER repository file with the command below"]}),command:i.addRepo},refresh:{comment:m.jsxs(z,{t:n,i18nKey:"refresh",children:[m.jsx("b",{children:"Step 2:"})," Refresh repositories"]}),command:i.refresh},install:{comment:m.jsxs(z,{t:n,i18nKey:"install",children:[m.jsx("b",{children:"Step 3:"})," Install OpenVINO"]}),command:i.getInstallCommand(e),onCopy:()=>He.install()}};return m.jsxs(De,{title:r("install"),accent:!0,testId:"instructions",children:[m.jsx(b,{...o.addRepo}),m.jsx(b,{...o.refresh}),m.jsx(b,{...o.install})]})},wy=({state:e})=>{const t={ovPackage:e.PACKAGE.selected,os:e.OP_SYSTEM.selected,version:e.VERSION.selected,distribution:e.DISTRIBUTION.selected};if(t.distribution.key===A.PIP)return m.jsx(gy,{...t,distribution:t.distribution});if(t.distribution.key===A.ARCHIVE)return m.jsx(as,{...t,distribution:t.distribution});if(t.distribution.key===A.DOCKER)return m.jsx(as,{...t,distribution:t.distribution});if(t.distribution.key===A.GITHUB||t.distribution.key===A.GITEE)return m.jsx(py,{...t,distribution:t.distribution});if(t.distribution.key===A.APT)return m.jsx(uy,{...t,distribution:t.distribution});if(t.distribution.key===A.YUM)return m.jsx(vy,{...t,distribution:t.distribution});if(t.distribution.key===A.CONDA)return m.jsx(dy,{...t,distribution:t.distribution});if(t.distribution.key===A.BREW)return m.jsx(cy,{...t,distribution:t.distribution});if(t.distribution.key===A.VCPKG)return m.jsx(my,{...t,distribution:t.distribution});if(t.distribution.key===A.CONAN)return m.jsx(fy,{...t,distribution:t.distribution});if(t.distribution.key===A.NPM)return m.jsx(hy,{...t,distribution:t.distribution});if(t.distribution.key===A.ZYPPER)return m.jsx(yy,{...t,distribution:t.distribution});if(t.distribution.key===A.SNAP)return m.jsx(as,{...t,distribution:t.distribution});const n=t.distribution;throw new Error(`${n}`)};function ky(){const{t:e}=M("common",{keyPrefix:"relatedTools"}),{t}=M("translation");return m.jsx(De,{title:t("selectorForm.titles.relatedTools"),testId:"relatedTools",accent:!0,dashed:!0,children:m.jsxs("div",{className:"st-related-tools-links",children:[m.jsx(we,{href:"https://github.com/openvinotoolkit/openvino_notebooks",testId:"notebooks-link",children:e("OpenVINONotebooks")}),m.jsx(we,{href:"https://huggingface.co/docs/optimum/main/intel/openvino/inference",testId:"hf_optimum-link",children:"Hugging Face + Optimum Intel"}),m.jsx("div",{children:m.jsxs(z,{t:e,i18nKey:"tokenizers",children:[m.jsx(we,{href:"https://docs.openvino.ai/2024/learn-openvino/llm_inference_guide/ov-tokenizers.html",testId:"openvino_tokenizers-link",children:"OpenVINO Tokenizers"}),"to streamline tokenizer conversion"]})}),m.jsx("div",{children:m.jsxs(z,{t:e,i18nKey:"nncf",children:[m.jsx(we,{href:"https://docs.openvino.ai/2024/openvino-workflow/model-optimization-guide/compressing-models-during-training.html",testId:"nncf-link",children:"NNCF"}),"for implementing compression algorithms on models"]})}),m.jsx("div",{children:m.jsxs(z,{t:e,i18nKey:"ovms",children:[m.jsx(we,{href:"https://docs.openvino.ai/2024/ovms_what_is_openvino_model_server.html",testId:"ovms-link",children:"OVMS"}),"for serving models optimized for deployment"]})})]})})}function Sy({state:e}){const t=e.PACKAGE.selected,n=e.DISTRIBUTION.selected,r=e.VERSION.selected,{t:i}=M("translation",{keyPrefix:"selectorForm.titles"}),{t:o}=M("common",{keyPrefix:"resources"});let s=m.jsx(m.Fragment,{});if(A.GITHUB===n.key||A.GITEE===n.key){const l=n.key===A.GITHUB?t.key===Se.OPENVINO_BASE?o("githubRepository"):o("githubGenAIRepository"):t.key===Se.OPENVINO_BASE?o("giteeRepository"):o("giteeGenAIRepository");s=m.jsxs(m.Fragment,{children:[m.jsx(we,{href:n.data.links.getBuildInstructionsLink(t,r),testId:"install-instructions-link",children:o("installationInstructions")}),m.jsx(we,{href:n.data.links.getRepositoryLink(t,r),testId:"repository-link",children:l})]})}else s=m.jsx(we,{href:n.data.linksSet.installation,testId:"install-instructions-link",children:o("installationInstructions")});return m.jsx(De,{title:i("resources"),testId:"resources",accent:!0,children:m.jsxs("div",{className:"st-resources-links",children:[m.jsxs("div",{children:[s,m.jsx(we,{href:"https://github.com/openvinotoolkit/openvino/releases",testId:"previous-releases-link",children:o("prevReleases")}),m.jsx(we,{href:r.metadata.systemRequirementsLink,testId:"system-requirements-link",children:o("systemRequirements")})]}),m.jsxs("div",{children:[m.jsx(we,{href:r.metadata.getStartedLink,testId:"get-started-link",children:o("getStarted")}),m.jsx(we,{href:r.metadata.troubleshootingLink,testId:"troubleshooting-link",children:o("troubleshooting")})]})]})})}const sn={toggleButton:"spark-button spark-button-size-l spark-focus-visible spark-focus-visible-self spark-focus-visible-snap",toggleButtonGroup:"spark-button-group spark-button-group-orientation-horizontal spark-button-group-align-start spark-button-group-spacing-l",actionButton:"spark-button-action",secondaryButton:"spark-button-secondary",disabledButton:"spark-button-disabled",buttonContent:"spark-button-content",fontXs:"spark-font-25"},xy=({onClick:e,checked:t=!1,disabled:n=!1,title:r,subtitle:i,value:o})=>m.jsx("button",{className:`${sn.toggleButton} ${t?sn.actionButton:sn.secondaryButton} ${n&&sn.disabledButton}`,type:"button",role:"radio","aria-checked":t,onClick:()=>e==null?void 0:e(),"data-cy":o,"aria-label":r,children:m.jsxs("span",{className:sn.buttonContent,children:[m.jsx("span",{className:"title",children:r}),i&&m.jsx("span",{className:`${sn.fontXs} subtitle`,children:i})]})}),Oy=({children:e,className:t})=>m.jsx("div",{className:`option-button-group ${t||""} ${sn.toggleButtonGroup}`,children:e});function ki({title:e,options:t,level:n}){const r=F.useContext(zd),i=t.map(({level:o,key:s,checked:l,metadata:a})=>m.jsx(xy,{value:`${o}_${s}`,checked:l,title:a.title,subtitle:a.subtitle,onClick:()=>r(o,s)},s));return m.jsx(De,{title:e,testId:n,children:m.jsx(Oy,{children:i})})}function Py({state:e}){const t=e.PACKAGE.nodes,n=e.VERSION.nodes,r=e.OP_SYSTEM.nodes,i=e.DISTRIBUTION.nodes;F.useEffect(()=>He.combinationView(),[e]);const{t:o}=M("translation",{keyPrefix:"selectorForm.titles"});return m.jsxs(m.Fragment,{children:[m.jsx(ki,{title:o("package"),options:t,level:T.PACKAGE}),m.jsx(ki,{title:o("version"),options:n,level:T.VERSION}),m.jsx(ki,{title:o("os"),options:r,level:T.OP_SYSTEM}),m.jsx(ki,{title:o("distribution"),options:i,level:T.DISTRIBUTION})]})}const{SelectorContext:zd,useSelector:Ny}=qv();He.initialize(window.parent);function _y(){const[e,t]=Ny();return m.jsx("div",{className:`st-responsive-container ${Fd?"idz-page":""}`,children:m.jsxs(zd.Provider,{value:t,children:[m.jsx(Py,{state:e}),m.jsx(wy,{state:e}),m.jsx(Sy,{state:e}),m.jsx(ky,{})]})})}ds.createRoot(document.getElementById("root")).render(m.jsx(np.StrictMode,{children:m.jsx(_y,{})})); +EOF`,getMoveRepoFileCommand:e=>`sudo mv /tmp/openvino-${e.metadata.yumYear}.repo ${Zu}`,verifyRepoCommand:"yum repolist | grep -i openvino",getInstallCommand:e=>`sudo yum install openvino-${e.metadata.yumVersion}`};class Sv extends Ae{constructor(t){super({level:T.DISTRIBUTION,key:A.ZYPPER,metadata:{title:"ZYPPER",subtitle:de("distributions.CAPIOnly")}}),this._data=t}get data(){return{...this._data,commands:xv}}}const xv={addRepo:"sudo zypper addrepo https://download.opensuse.org/repositories/science/openSUSE_Tumbleweed/science.repo",refresh:"sudo zypper refresh",getInstallCommand:({metadata:e})=>`sudo zypper install openvino-devel-${e.zypperVersion} openvino-sample-${e.zypperVersion}`};class aa extends Xr{constructor(t,n,r){super({level:T.OP_SYSTEM,key:t,metadata:n},r),this._setDefaultOS()}_setDefaultOS(){const t=this._detectOS()||Qe.WINDOWS;this.key===t&&this.default()}_detectOS(){const{userAgent:t}=navigator,n={windows:/(Windows|Win)/g,macOS:/(Macintosh|Mac)/g,linux:/(Linux|X11)/g};return n.windows.test(t)?Qe.WINDOWS:n.macOS.test(t)?Qe.MACOS:n.linux.test(t)?Qe.LINUX:null}}class Zr extends aa{constructor(t){super(Qe.WINDOWS,Mm,t)}}class qr extends aa{constructor(t){super(Qe.MACOS,Bm,t)}}class ei extends aa{constructor(t){super(Qe.LINUX,Km,t)}}const Ov=new ei([new Ee({linksSet:{installation:"https://docs.openvino.ai/nightly/get-started/install-openvino/install-openvino-pip.html"}},{pythonAPI:!0}).includesNPUPlugin().default(),new Pe({linksSet:{installation:"https://docs.openvino.ai/nightly/get-started/install-openvino/install-openvino-archive-linux.html"},downloadLink:"https://storage.openvinotoolkit.org/repositories/openvino/packages/nightly/latest"}),new _e,new Ne]),Pv=new qr([new Ee({linksSet:{installation:"https://docs.openvino.ai/nightly/get-started/install-openvino/install-openvino-pip.html"}},{pythonAPI:!0}).default(),new Pe({linksSet:{installation:"https://docs.openvino.ai/nightly/get-started/install-openvino/install-openvino-archive-macos.html"},downloadLink:"https://storage.openvinotoolkit.org/repositories/openvino/packages/nightly/latest"}),new _e,new Ne]),Nv=new Zr([new Ee({linksSet:{installation:"https://docs.openvino.ai/nightly/get-started/install-openvino/install-openvino-pip.html"}},{pythonAPI:!0}).includesNPUPlugin().default(),new Pe({linksSet:{installation:"https://docs.openvino.ai/nightly/get-started/install-openvino/install-openvino-archive-windows.html"},downloadLink:"https://storage.openvinotoolkit.org/repositories/openvino/packages/nightly/latest"}),new _e,new Ne]),_v=new Cd([Nv,Pv,Ov]),Ev=new ei([new Ee({linksSet:{installation:"https://docs.openvino.ai/2023.3/openvino_docs_install_guides_installing_openvino_pip.html"}},{pythonAPI:!0}).default(),new Pe({linksSet:{installation:"https://docs.openvino.ai/2023.3/openvino_docs_install_guides_installing_openvino_from_archive_linux.html"},downloadLink:"https://storage.openvinotoolkit.org/repositories/openvino/packages/2023.3/linux"}).includesNPUPlugin(),new Id({linksSet:{installation:"https://docs.openvino.ai/2023.3/openvino_docs_install_guides_installing_openvino_apt.html"},os:[re.UBUNTU_18,re.UBUNTU_20,re.UBUNTU_22]}),new Td({linksSet:{installation:"https://docs.openvino.ai/2023.3/openvino_docs_install_guides_installing_openvino_yum.html"}}),new _e,new Ne,new ir({linksSet:{installation:"https://docs.openvino.ai/2023.3/openvino_docs_install_guides_installing_openvino_docker.html"},downloadLink:"https://docs.openvino.ai/2023.3/openvino_docs_install_guides_installing_openvino_docker.html"}),new rr({linksSet:{installation:"https://docs.openvino.ai/2023.3/openvino_docs_install_guides_installing_openvino_conda.html"}}),new nr({linksSet:{installation:"https://docs.openvino.ai/2023.3/openvino_docs_install_guides_installing_openvino_conan.html"}})]),Cv=new qr([new Ee({linksSet:{installation:"https://docs.openvino.ai/2023.3/openvino_docs_install_guides_installing_openvino_pip.html"}},{pythonAPI:!0}).default(),new Pe({linksSet:{installation:"https://docs.openvino.ai/2023.3/openvino_docs_install_guides_installing_openvino_from_archive_macos.html"},downloadLink:"https://storage.openvinotoolkit.org/repositories/openvino/packages/2023.3/macos"}),new _e,new Ne,new rr({linksSet:{installation:"https://docs.openvino.ai/2023.3/openvino_docs_install_guides_installing_openvino_conda.html"}}),new nr({linksSet:{installation:"https://docs.openvino.ai/2023.3/openvino_docs_install_guides_installing_openvino_conan.html"}})]),jv=new Zr([new Ee({linksSet:{installation:"https://docs.openvino.ai/2023.3/openvino_docs_install_guides_installing_openvino_pip.html"}},{pythonAPI:!0}).default(),new Pe({linksSet:{installation:"https://docs.openvino.ai/2023.3/openvino_docs_install_guides_installing_openvino_from_archive_windows.html"},downloadLink:"https://storage.openvinotoolkit.org/repositories/openvino/packages/2023.3/windows"}).includesNPUPlugin(),new _e,new Ne,new ir({linksSet:{installation:"https://docs.openvino.ai/2023.3/openvino_docs_install_guides_installing_openvino_docker.html"},downloadLink:"https://docs.openvino.ai/2023.3/openvino_docs_install_guides_installing_openvino_docker.html"}),new rr({linksSet:{installation:"https://docs.openvino.ai/2023.3/openvino_docs_install_guides_installing_openvino_conda.html"}}),new nr({linksSet:{installation:"https://docs.openvino.ai/2023.3/openvino_docs_install_guides_installing_openvino_conan.html"}})]),Iv=new Xm([jv,Cv,Ev]),Lv=new ei([new Ee({linksSet:{installation:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-pip.html"}},{pythonAPI:!0}).includesNPUPlugin().default(),new Pe({linksSet:{installation:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-archive-linux.html"},downloadLink:"https://storage.openvinotoolkit.org/repositories/openvino/packages/2024.6/linux"}).includesNPUPlugin(),new Id({linksSet:{installation:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-apt.html"},os:[re.UBUNTU_20,re.UBUNTU_22,re.UBUNTU_24]}),new Td({linksSet:{installation:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-yum.html"}}),new _e,new Ne,new ir({linksSet:{installation:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-docker-linux.html"},downloadLink:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-docker-linux.html"}),new rr({linksSet:{installation:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-conda.html"}}),new Ld({linksSet:{installation:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-brew.html"}}),new la({linksSet:{installation:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-vcpkg.html"}}),new nr({linksSet:{installation:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-conan.html"}}),new sa({linksSet:{installation:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-npm.html"}}),new Sv({linksSet:{installation:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-zypper.html"}}),new yv({linksSet:{installation:"https://docs.openvino.ai/2024/openvino-workflow/deployment-locally/integrate-openvino-with-ubuntu-snap.html"},downloadLink:"https://docs.openvino.ai/2024/openvino-workflow/deployment-locally/integrate-openvino-with-ubuntu-snap.html"})]),Rv=new qr([new Ee({linksSet:{installation:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-pip.html"}},{pythonAPI:!0}).default(),new Pe({linksSet:{installation:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-archive-macos.html"},downloadLink:"https://storage.openvinotoolkit.org/repositories/openvino/packages/2024.6/macos"}),new _e,new Ne,new rr({linksSet:{installation:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-conda.html"}}),new Ld({linksSet:{installation:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-brew.html"}}),new la({linksSet:{installation:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-vcpkg.html"}}),new nr({linksSet:{installation:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-conan.html"}}),new sa({linksSet:{installation:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-npm.html"}})]),Tv=new Zr([new Ee({linksSet:{installation:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-pip.html"}},{pythonAPI:!0}).includesNPUPlugin().default(),new Pe({linksSet:{installation:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-archive-windows.html"},downloadLink:"https://storage.openvinotoolkit.org/repositories/openvino/packages/2024.6/windows"}).includesNPUPlugin(),new _e,new Ne,new ir({linksSet:{installation:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-docker-linux.html"},downloadLink:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-docker-linux.html"}),new rr({linksSet:{installation:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-conda.html"}}),new la({linksSet:{installation:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-vcpkg.html"}}),new nr({linksSet:{installation:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-conan.html"}}),new sa({linksSet:{installation:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-npm.html"}})]),Av=new jd([Tv,Rv,Lv]),Dv=new Gm([Av.default(),_v,Iv]),Uv=new ei([new Ee({linksSet:{installation:"https://docs.openvino.ai/nightly/get-started/install-openvino/install-openvino-genai.html#pypi-installation"}},{pythonAPI:!0}).includesNPUPlugin().default(),new Pe({linksSet:{installation:"https://docs.openvino.ai/nightly/get-started/install-openvino/install-openvino-genai.html#archive-installation"},downloadLink:"https://storage.openvinotoolkit.org/repositories/openvino_genai/packages/nightly/latest"}),new _e,new Ne]),Fv=new qr([new Ee({linksSet:{installation:"https://docs.openvino.ai/nightly/get-started/install-openvino/install-openvino-genai.html#pypi-installation"}},{pythonAPI:!0}).default(),new Pe({linksSet:{installation:"https://docs.openvino.ai/nightly/get-started/install-openvino/install-openvino-genai.html#archive-installation"},downloadLink:"https://storage.openvinotoolkit.org/repositories/openvino_genai/packages/nightly/latest"}),new _e,new Ne]),zv=new Zr([new Ee({linksSet:{installation:"https://docs.openvino.ai/nightly/get-started/install-openvino/install-openvino-genai.html#pypi-installation"}},{pythonAPI:!0}).includesNPUPlugin().default(),new Pe({linksSet:{installation:"https://docs.openvino.ai/nightly/get-started/install-openvino/install-openvino-genai.html#archive-installation"},downloadLink:"https://storage.openvinotoolkit.org/repositories/openvino_genai/packages/nightly/latest"}),new _e,new Ne]),Vv=new Cd([zv,Fv,Uv]),bv=new ei([new Ee({linksSet:{installation:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-genai.html"}},{pythonAPI:!0}).includesNPUPlugin().default(),new Pe({linksSet:{installation:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-genai.html"},downloadLink:"https://storage.openvinotoolkit.org/repositories/openvino_genai/packages/2024.6/linux"}).includesNPUPlugin(),new _e,new Ne,new ir({linksSet:{installation:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-docker-linux.html"},downloadLink:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-docker-linux.html"})]),$v=new qr([new Ee({linksSet:{installation:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-genai.html"}},{pythonAPI:!0}).default(),new Pe({linksSet:{installation:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-genai.html"},downloadLink:"https://storage.openvinotoolkit.org/repositories/openvino_genai/packages/2024.6/macos"}),new _e,new Ne]),Mv=new Zr([new Ee({linksSet:{installation:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-genai.html"}},{pythonAPI:!0}).includesNPUPlugin().default(),new Pe({linksSet:{installation:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-genai.html"},downloadLink:"https://storage.openvinotoolkit.org/repositories/openvino_genai/packages/2024.6/windows"}).includesNPUPlugin(),new _e,new Ne,new ir({linksSet:{installation:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-docker-linux.html"},downloadLink:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-docker-linux.html"})]),Bv=new jd([Mv,$v,bv]),Kv=new Wm([Bv.default(),Vv]),Hv=new Xr({level:T.ROOT,key:_d.ROOT,metadata:{title:"ROOT"}},[Dv.default(),Kv]).default();function Gv(e,t){var i,o;if(t.key===A.DOCKER||!t.footnoteLevel)return e;const n=(i=e[t.footnoteLevel])==null?void 0:i.selected,r=(o=e[t.footnoteLevel])==null?void 0:o.nodes;return!n||!r||((Array.isArray(n)?[...n,...r]:[n]).forEach(s=>s.hasFootnote=!0),r.forEach(s=>s.checked&&(s.hasFootnote=!0))),e}class Wv{constructor(){ze(this,"_root",Hv)}getState(){try{return this._getState()}catch(t){return console.error(t),this._selectDefaults(this._root),this._getState()}}_getState(){const t=this._root.children,n=this._get_selected(t),r=n.children,i=this._get_selected(r),{systems:o,system:s}=this._processVersion(i),l=s.children,a=this._get_selected(l),c={[T.PACKAGE]:{nodes:t.map(p=>p.toOption()),selected:n.toOption()},[T.VERSION]:{nodes:r.map(p=>p.toOption()),selected:i.toOption()},[T.OP_SYSTEM]:{nodes:o.map(p=>p.toOption()),selected:s.toOption()},[T.DISTRIBUTION]:{nodes:l.map(p=>p.toOption()),selected:a.toOption()}};return Gv(c,a)}_get_selected(t){t.some(({checked:r})=>r)||this._selectDefaultsForLevel(t[0].level);const n=t.find(({checked:r})=>r);if(!n)throw new Error("Not valid tree");return n}_processVersion(t){const n=t.children,r=this._get_selected(n);return{systems:n,system:r}}setState(t){this._setState(t)}_setState(t,n=this._root){if(!n.children.length)return;const r=n.children[0].level,i=Yv(t[r]);n.children.forEach(o=>o.checked=i.includes(o.key)),n.children.forEach(o=>this._setState(t,o))}select(t,n){return this._select(t,n),this.getState()}_select(t,n,r=this._root){var i;if(((i=r.children[0])==null?void 0:i.level)!==t){r.children.forEach(o=>this._select(t,n,o));return}if(r.childrenSelector){r.childrenSelector(r.children,n);return}r.children.forEach(o=>o.checked=o.key===n)}_selectDefaultsForLevel(t,n=this._root){if(n.children.length){if(n.children[0].level!==t){n.children.forEach(r=>this._selectDefaultsForLevel(t,r));return}this._selectDefaults(n)}}_selectDefaults(t){t.children.forEach(n=>{n.checked=n.isDefault,this._selectDefaults(n)})}}const _n=new Wv;function Yv(e){const t=[];return Array.isArray(e)?t.push(...e):e&&t.push(e),t}function Ad(e,{serializeVersion:t}={serializeVersion:!0}){const n=[[T.PACKAGE,e.PACKAGE.selected.key],[T.VERSION,t?e.VERSION.selected.key:null],[T.OP_SYSTEM,e.OP_SYSTEM.selected.key],[T.DISTRIBUTION,e.DISTRIBUTION.selected.key]],r=new URLSearchParams;for(const[i,o]of n)o&&r.set(i,o);return r}function Dd(e){function t(n,r){const i=e.get(n);if(!i)throw new Error(`Cannot extract value for: ${n}`);if(!r[i])throw new Error(`Bad node key for: ${n}`);return r[i]}try{return{[T.PACKAGE]:t(T.PACKAGE,Se),[T.VERSION]:e.has(T.VERSION)?t(T.VERSION,wn):null,[T.OP_SYSTEM]:t(T.OP_SYSTEM,Qe),[T.DISTRIBUTION]:t(T.DISTRIBUTION,A)}}catch(n){return console.log(`Cannot restore state from url due to error "${n}"`),null}}function Qv(){const e=window.parent;if(!e.location.search)return null;const t=new URLSearchParams(e.location.search);return Dd(t)}function Jv(e,t,n,{serializeVersion:r}={serializeVersion:!0}){F.useEffect(()=>{const i=window.parent,o=Ad(t,{serializeVersion:r}).toString(),s=new URL(i.location.toString());if(!s.search){s.search=o,i.history.replaceState(null,"",s);return}s.search.slice(1)!==o&&(s.search=o,i.history.pushState(null,"",s))}),parent.onpopstate=()=>{const i=window.parent,o=new URLSearchParams(i.location.search),s=Dd(o);s&&(e.setState(s),n(e.getState()))}}const os=function(e){let t,n=!1;return function(...r){return n||(t=e(r),n=!0),t}};function Xv(e){var t,n;return typeof((n=(t=e.wap_tms)==null?void 0:t.custom)==null?void 0:n.trackComponentClick)!="function"?null:e.wap_tms.custom.trackComponentClick.bind(e.wap_tms.custom)}class Zv{constructor(){ze(this,"_window");ze(this,"_consoleNotification",{notInitialized:os(()=>console.log("Adobe analytics is not initialized")),notFound:os(()=>console.log("Adobe analytics not found on a page")),devMode:os(()=>console.log("Analytics in dev mode"))});ze(this,"_send",t=>{if(!this._window){this._consoleNotification.notInitialized();return}const n=Ad(_n.getState()).toString(),r=Xv(this._window);if(!r){this._consoleNotification.notFound();return}try{r(t,n)}catch(i){console.error(i)}})}initialize(t){this._window=t}install(){this._send("install")}combinationView(){this._send("combination-view")}}const He=new Zv;function qv(){const e=Qv();e&&_n.setState(e);const t=F.createContext((r,i)=>{_n.select(r,i)});function n(){const[r,i]=F.useState(_n.getState());return Jv(_n,r,i),[r,(o,s)=>i(_n.select(o,s))]}return{SelectorContext:t,useSelector:n}}async function ey(e){e&&(navigator.clipboard?await navigator.clipboard.writeText(e):ty(e))}function ty(e){const t=ny(e);document.body.append(t),t.select(),document.execCommand("copy"),t.remove()}function ny(e){const t=document.createElement("textarea");t.style.fontSize="12pt",t.style.border="0",t.style.padding="0",t.style.margin="0",t.style.position="absolute",t.style.left="-9999px";const n=window.pageYOffset||document.documentElement.scrollTop;return t.style.top=`${n}px`,t.setAttribute("readonly",""),t.value=e,t}function ry(){return m.jsxs("svg",{version:"1.1",width:"24",height:"24",viewBox:"0 0 205 205",xmlns:"http://www.w3.org/2000/svg",className:"svg-icon",children:[m.jsx("path",{fill:"none",stroke:"currentColor",strokeWidth:"10",d:"M 50 145 a 15 15 0 0 1 -15 -15 v -90 a 15 15 0 0 1 15 -15 h 70 a 15 15 0 0 1 15 15 v 5"}),m.jsx("rect",{x:"65",y:"60",width:"100",height:"120",rx:"15",fill:"none",stroke:"currentColor",strokeWidth:"10"})]})}function iy(){return m.jsx("svg",{version:"1.1",width:"24",height:"24",viewBox:"0 0 200 200",xmlns:"http://www.w3.org/2000/svg",className:"svg-icon",children:m.jsx("path",{strokeLinejoin:"round",strokeLinecap:"round",fill:"none",stroke:"currentColor",strokeWidth:"15",d:"M 40 100 L 90 150 L 170 40"})})}const b=({comment:e,command:t,onCopy:n})=>{const[r,i]=F.useState(!1),o=async()=>{r||(await ey(t),i(!0),setTimeout(()=>i(!1),1500),n==null||n())};return m.jsxs("div",{className:"st-code-snippet","data-cy":"instructions-step",children:[e&&m.jsx(Ud,{children:e}),m.jsxs("div",{"data-cy":"command",children:[t&&m.jsx("code",{className:"st-code-snippet-content",children:t}),t&&m.jsx("button",{className:"copy-button",type:"button","aria-label":"Copy","data-cy":"copy",onClick:o,children:r?m.jsx(iy,{}):m.jsx(ry,{})})]})]})},Ud=({children:e})=>m.jsxs("pre",{className:"st-code-snippet-comment",children:["# ",e]}),oy=({comment:e,snippets:t})=>m.jsxs("div",{className:"st-code-snippet-multi-line","data-cy":"command",children:[e&&m.jsx(Ud,{children:e}),t.map(n=>m.jsx(b,{...n},n.command))]});function sy(e){return e.host==="docs.openvino.ai"}const ss="production.docs.en",Fd=(ss==null?void 0:ss.includes("idz"))||!1,ls={link:"spark-hyperlink spark-hyperlink-primary spark-hyperlink-standard spark-focus-visible spark-focus-visible-self spark-focus-visible-snap spark-focus-visible-background",button:"spark-button spark-button-action spark-button-size-m spark-focus-visible spark-focus-visible-self spark-focus-visible-snap",buttonContent:"spark-button-content"},we=({href:e,children:t,type:n="link",testId:r="link",onClick:i})=>{const o=!Fd&&sy(new URL(e))?"_parent":"_blank";return n==="link"?m.jsx("a",{href:e,className:ls.link,target:o,rel:"noreferrer noopener","data-cy":r,onClick:()=>i==null?void 0:i(),children:t}):m.jsx("span",{className:ls.button,children:m.jsx("span",{className:ls.buttonContent,children:m.jsx("a",{href:e,target:o,rel:"noreferrer noopener","data-cy":r,onClick:()=>i==null?void 0:i(),children:t})})})},ly={heading:"spark-heading spark-font-200"},De=({title:e,accent:t=!1,dashed:n=!1,children:r,testId:i})=>m.jsxs("div",{className:`st-section ${t?"st-section-accent":""} ${n?"st-section-dashed":""}`,"data-cy":i,children:[m.jsx("span",{className:`st-section-title ${ly.heading}`,children:e}),m.jsx("div",{className:"st-section-content",children:F.Children.map(r,o=>m.jsx(ay,{children:o}))})]}),ay=({children:e})=>m.jsx("div",{className:"st-section-content-row",children:e}),uy=({version:e,distribution:t})=>{const{t:n}=M("translation",{keyPrefix:"distributions.apt.steps"}),{t:r}=M("translation",{keyPrefix:"selectorForm.titles"}),{commands:i}=t.data,o={comment:m.jsxs(z,{ns:"translation",i18nKey:"distributions.apt.steps.addRepository",children:[m.jsx("b",{children:"Step 3:"})," Add the repository via the following command"]}),snippets:i.getAddRepositoryCommands(e,t.data.os).map(({ubuntuVersionNumber:l,command:a})=>({comment:`Ubuntu ${l}`,command:a}))},s={downloadKey:{comment:m.jsxs(z,{t:n,i18nKey:"download",values:{filename:i.keyFilename},children:[m.jsx("b",{children:"Step 1:"})," Download the ",m.jsx(we,{href:i.keyHref,children:i.keyFilename}),". You can also use the following command"]}),command:i.downloadKeyCommand},addKey:{comment:m.jsxs(z,{t:n,i18nKey:"addKey",children:[m.jsx("b",{children:"Step 2:"})," Add this key to the system keyring"]}),command:i.addKeyCommand},addRepository:o,updatePackages:{comment:m.jsxs(z,{t:n,i18nKey:"updateList",children:[m.jsx("b",{children:"Step 4:"})," Update the list of packages via the update command"]}),command:i.updatePackagesCommand},verifyAptCache:{comment:m.jsxs(z,{t:n,i18nKey:"verify",children:[m.jsx("b",{children:"Step 5:"})," Verify that the APT repository is properly set up. Run the apt-cache command to see a list of all available OpenVINO packages and components"]}),command:i.verifyAptCacheCommand},install:{comment:m.jsxs(z,{t:n,i18nKey:"install",children:[m.jsx("b",{children:"Step 6:"})," Install OpenVINO Runtime"]}),command:i.getInstallCommand(e),onCopy:()=>He.install()}};return m.jsxs(De,{title:r("install"),accent:!0,testId:"instructions",children:[m.jsx(b,{...s.downloadKey}),m.jsx(b,{...s.addKey}),m.jsx(oy,{...s.addRepository}),m.jsx(b,{...s.updatePackages}),m.jsx(b,{...s.verifyAptCache}),m.jsx(b,{...s.install})]})},cy=({distribution:e})=>{const{t}=M("translation",{keyPrefix:"distributions.brew.steps"}),{t:n}=M("translation",{keyPrefix:"selectorForm.titles"}),{commands:r}=e.data,i={install:{comment:m.jsx(z,{t,i18nKey:"install",children:"Download and install the package"}),command:r.install,onCopy:()=>He.install()}};return m.jsx(De,{title:n("install"),accent:!0,testId:"instructions",children:m.jsx(b,{...i.install})})},fy=({version:e,distribution:t})=>{const{t:n}=M("translation",{keyPrefix:"distributions.conan.steps"}),{t:r}=M("translation",{keyPrefix:"selectorForm.titles"}),{commands:i}=t.data,{txtFilename:o,cmakeFilename:s}=i,l={createConanFile:{comment:m.jsxs(z,{t:n,i18nKey:"createConanFile",values:{txtFilename:o},children:[m.jsx("b",{children:"Step 1:"})," Create a ",m.jsx("b",{children:o})," file for your OpenVINO project and add “openvino” dependency in there"]}),command:i.conanTXTContent(e)},install:{comment:m.jsxs(z,{t:n,i18nKey:"install",values:{cmakeFilename:s},children:[m.jsx("b",{children:"Step 2:"})," Run the command below to create ",m.jsx("b",{children:s})," file, which will be used to compile your project with OpenVINO"]}),command:i.install,onCopy:()=>He.install()},compile:{comment:m.jsxs(z,{t:n,i18nKey:"compile",children:[m.jsx("b",{children:"Step 3:"})," Configure and compile your project with OpenVINO"]}),command:i.compile}};return m.jsxs(De,{title:r("install"),accent:!0,testId:"instructions",children:[m.jsx(b,{...l.createConanFile}),m.jsx(b,{...l.install}),m.jsx(b,{...l.compile})]})},dy=({version:e,distribution:t})=>{const{t:n}=M("translation",{keyPrefix:"distributions.conda.steps"}),{t:r}=M("translation",{keyPrefix:"selectorForm.titles"}),{commands:i}=t.data,o={createEnv:{comment:m.jsxs(z,{t:n,i18nKey:"createEnv",children:[m.jsx("b",{children:"Step 1:"})," Create the Anaconda environment (Python 3.10 used as an example)"]}),command:i.createEnv},activateEnv:{comment:m.jsxs(z,{t:n,i18nKey:"activateEnv",children:[m.jsx("b",{children:"Step 2:"})," Activate the Anaconda environment"]}),command:i.activateEnv},upgradePip:{comment:m.jsxs(z,{t:n,i18nKey:"update",children:[m.jsx("b",{children:"Step 3:"})," Update the Anaconda to latest version"]}),command:i.update},install:{comment:m.jsxs(z,{t:n,i18nKey:"install",children:[m.jsx("b",{children:"Step 4:"})," Download and install the package"]}),command:i.getInstall(e),onCopy:()=>He.install()}};return m.jsxs(De,{title:r("install"),accent:!0,testId:"instructions",children:[m.jsx(b,{...o.createEnv}),m.jsx(b,{...o.activateEnv}),m.jsx(b,{...o.upgradePip}),m.jsx(b,{...o.install})]})},as=({ovPackage:e,distribution:t})=>{const{t:n}=M("translation",{keyPrefix:"distributions.download"}),{t:r}=M("translation",{keyPrefix:"selectorForm.titles"}),i={[A.ARCHIVE]:e.key===Se.OPENVINO_BASE?n("downloadArchives"):n("downloadArchivesGenAI"),[A.DOCKER]:n("gotoDocker"),[A.SNAP]:n("gotoInstallInstruction")}[t.key],o=m.jsxs(m.Fragment,{children:[n("useFollowingLink"),m.jsx("br",{}),m.jsx("b",{children:m.jsx(we,{href:t.data.downloadLink,testId:"download-button",onClick:()=>He.install(),children:i})})]});return m.jsx(De,{title:r("install"),accent:!0,testId:"instructions",children:m.jsx(b,{comment:o})})},py=({ovPackage:e,version:t,distribution:n})=>{const{t:r}=M("translation",{keyPrefix:"distributions.githubGitee"}),{t:i}=M("translation",{keyPrefix:"selectorForm.titles"}),o={clone:{comment:m.jsxs(z,{t:r,i18nKey:"steps.useGitClone",children:[m.jsx("b",{children:"Step 1:"})," Use Git to clone the OpenVINO toolkit repository"]}),command:n.data.commands.getCloneCommand(e,t),onCopy:()=>He.install()},build:{comment:m.jsxs(z,{t:r,i18nKey:"steps.buildInstructions",children:[m.jsx("b",{children:"Step 2:"})," Follow the ",m.jsx(we,{href:n.data.links.getBuildInstructionsLink(e,t),testId:"build-instructions-link",children:"instructions to build from source"})]})}};return m.jsxs(De,{title:i("install"),accent:!0,testId:"instructions",children:[m.jsx(b,{...o.clone}),m.jsx(b,{...o.build})]})},hy=({distribution:e,version:t})=>{const{t:n}=M("translation",{keyPrefix:"distributions.npm.steps"}),{t:r}=M("translation",{keyPrefix:"selectorForm.titles"}),{commands:i}=e.data,o={install:{comment:m.jsx(z,{t:n,i18nKey:"install",children:"Download and install the package"}),command:i.getInstall(t),onCopy:()=>He.install()}};return m.jsx(De,{title:r("install"),accent:!0,testId:"instructions",children:m.jsx(b,{...o.install})})},gy=({ovPackage:e,os:t,version:n,distribution:r})=>{const{t:i}=M("translation",{keyPrefix:"distributions.pip.steps"}),{t:o}=M("translation",{keyPrefix:"selectorForm.titles"}),{commands:s}=r.data,l=s.getCreateVenvCommand(t,n),a=s.getActivateVenvCommand(t,n),c=s.getInstallCommand({ovPackage:e,os:t,version:n}),p={createEnv:{comment:m.jsxs(z,{t:i,i18nKey:"createVenv",children:[m.jsx("b",{children:"Step 1:"})," Create virtual environment"]}),command:l},activateEnv:{comment:m.jsxs(z,{t:i,i18nKey:"activateVenv",children:[m.jsx("b",{children:"Step 2:"})," Activate virtual environment"]}),command:a},upgradePip:{comment:m.jsxs(z,{t:i,i18nKey:"upgradePip",children:[m.jsx("b",{children:"Step 3:"})," Upgrade pip to latest version"]}),command:s.upgradeCommand},install:{comment:m.jsxs(z,{t:i,i18nKey:"install",children:[m.jsx("b",{children:"Step 4:"})," Download and install the package"]}),command:c,onCopy:()=>He.install()}};return m.jsxs(De,{title:o("install"),accent:!0,testId:"instructions",children:[m.jsx(b,{...p.createEnv}),m.jsx(b,{...p.activateEnv}),m.jsx(b,{...p.upgradePip}),m.jsx(b,{...p.install})]})},my=({distribution:e})=>{const{t}=M("translation",{keyPrefix:"distributions.vcpkg.steps"}),{t:n}=M("translation",{keyPrefix:"selectorForm.titles"}),{commands:r}=e.data,i={install:{comment:m.jsx(z,{t,i18nKey:"install",children:"Download and install the package"}),command:r.install,onCopy:()=>He.install()}};return m.jsx(De,{title:n("install"),accent:!0,testId:"instructions",children:m.jsx(b,{...i.install})})},vy=({version:e,distribution:t})=>{const{t:n}=M("translation",{keyPrefix:"distributions.yum.steps"}),{t:r}=M("translation",{keyPrefix:"selectorForm.titles"}),{yumYear:i}=e.metadata,{commands:o}=t.data,s={createRepo:{comment:m.jsxs(z,{t:n,i18nKey:"createRepoFile",children:[m.jsx("b",{children:"Step 1:"})," Create the YUM repo file in the /tmp directory as a normal user"]}),command:o.getCreateRepoCommand(e)},moveRepoFile:{comment:m.jsxs(z,{t:n,i18nKey:"moveFile",values:{year:i,directory:o.directory},children:[m.jsx("b",{children:"Step 2:"})," Move the new openvino-",{year:i},".repo file to the YUM configuration directory ",m.jsx("b",{children:o.directory})]}),command:o.getMoveRepoFileCommand(e)},verifyRepo:{comment:m.jsxs(z,{t:n,i18nKey:"verify",children:[m.jsx("b",{children:"Step 3:"})," Verify that the new repo is properly setup by running the following command"]}),command:o.verifyRepoCommand},install:{comment:m.jsxs(z,{t:n,i18nKey:"install",children:[m.jsx("b",{children:"Step 4:"})," Install OpenVINO Runtime"]}),command:o.getInstallCommand(e),onCopy:()=>He.install()}};return m.jsxs(De,{title:r("install"),accent:!0,testId:"instructions",children:[m.jsx(b,{...s.createRepo}),m.jsx(b,{...s.moveRepoFile}),m.jsx(b,{...s.verifyRepo}),m.jsx(b,{...s.install})]})},yy=({version:e,distribution:t})=>{const{t:n}=M("translation",{keyPrefix:"distributions.zypper.steps"}),{t:r}=M("translation",{keyPrefix:"selectorForm.titles"}),{commands:i}=t.data,o={addRepo:{comment:m.jsxs(z,{t:n,i18nKey:"addRepo",children:[m.jsx("b",{children:"Step 1:"})," Create a ZYPPER repository file with the command below"]}),command:i.addRepo},refresh:{comment:m.jsxs(z,{t:n,i18nKey:"refresh",children:[m.jsx("b",{children:"Step 2:"})," Refresh repositories"]}),command:i.refresh},install:{comment:m.jsxs(z,{t:n,i18nKey:"install",children:[m.jsx("b",{children:"Step 3:"})," Install OpenVINO"]}),command:i.getInstallCommand(e),onCopy:()=>He.install()}};return m.jsxs(De,{title:r("install"),accent:!0,testId:"instructions",children:[m.jsx(b,{...o.addRepo}),m.jsx(b,{...o.refresh}),m.jsx(b,{...o.install})]})},wy=({state:e})=>{const t={ovPackage:e.PACKAGE.selected,os:e.OP_SYSTEM.selected,version:e.VERSION.selected,distribution:e.DISTRIBUTION.selected};if(t.distribution.key===A.PIP)return m.jsx(gy,{...t,distribution:t.distribution});if(t.distribution.key===A.ARCHIVE)return m.jsx(as,{...t,distribution:t.distribution});if(t.distribution.key===A.DOCKER)return m.jsx(as,{...t,distribution:t.distribution});if(t.distribution.key===A.GITHUB||t.distribution.key===A.GITEE)return m.jsx(py,{...t,distribution:t.distribution});if(t.distribution.key===A.APT)return m.jsx(uy,{...t,distribution:t.distribution});if(t.distribution.key===A.YUM)return m.jsx(vy,{...t,distribution:t.distribution});if(t.distribution.key===A.CONDA)return m.jsx(dy,{...t,distribution:t.distribution});if(t.distribution.key===A.BREW)return m.jsx(cy,{...t,distribution:t.distribution});if(t.distribution.key===A.VCPKG)return m.jsx(my,{...t,distribution:t.distribution});if(t.distribution.key===A.CONAN)return m.jsx(fy,{...t,distribution:t.distribution});if(t.distribution.key===A.NPM)return m.jsx(hy,{...t,distribution:t.distribution});if(t.distribution.key===A.ZYPPER)return m.jsx(yy,{...t,distribution:t.distribution});if(t.distribution.key===A.SNAP)return m.jsx(as,{...t,distribution:t.distribution});const n=t.distribution;throw new Error(`${n}`)};function ky(){const{t:e}=M("common",{keyPrefix:"relatedTools"}),{t}=M("translation");return m.jsx(De,{title:t("selectorForm.titles.relatedTools"),testId:"relatedTools",accent:!0,dashed:!0,children:m.jsxs("div",{className:"st-related-tools-links",children:[m.jsx(we,{href:"https://github.com/openvinotoolkit/openvino_notebooks",testId:"notebooks-link",children:e("OpenVINONotebooks")}),m.jsx(we,{href:"https://huggingface.co/docs/optimum/main/intel/openvino/inference",testId:"hf_optimum-link",children:"Hugging Face + Optimum Intel"}),m.jsx("div",{children:m.jsxs(z,{t:e,i18nKey:"tokenizers",children:[m.jsx(we,{href:"https://docs.openvino.ai/2024/openvino-workflow-generative/ov-tokenizers.html",testId:"openvino_tokenizers-link",children:"OpenVINO Tokenizers"}),"to streamline tokenizer conversion"]})}),m.jsx("div",{children:m.jsxs(z,{t:e,i18nKey:"nncf",children:[m.jsx(we,{href:"https://docs.openvino.ai/2024/openvino-workflow/model-optimization-guide/compressing-models-during-training.html",testId:"nncf-link",children:"NNCF"}),"for implementing compression algorithms on models"]})}),m.jsx("div",{children:m.jsxs(z,{t:e,i18nKey:"ovms",children:[m.jsx(we,{href:"https://docs.openvino.ai/2024/ovms_what_is_openvino_model_server.html",testId:"ovms-link",children:"OVMS"}),"for serving models optimized for deployment"]})})]})})}function Sy({state:e}){const t=e.PACKAGE.selected,n=e.DISTRIBUTION.selected,r=e.VERSION.selected,{t:i}=M("translation",{keyPrefix:"selectorForm.titles"}),{t:o}=M("common",{keyPrefix:"resources"});let s=m.jsx(m.Fragment,{});if(A.GITHUB===n.key||A.GITEE===n.key){const l=n.key===A.GITHUB?t.key===Se.OPENVINO_BASE?o("githubRepository"):o("githubGenAIRepository"):t.key===Se.OPENVINO_BASE?o("giteeRepository"):o("giteeGenAIRepository");s=m.jsxs(m.Fragment,{children:[m.jsx(we,{href:n.data.links.getBuildInstructionsLink(t,r),testId:"install-instructions-link",children:o("installationInstructions")}),m.jsx(we,{href:n.data.links.getRepositoryLink(t,r),testId:"repository-link",children:l})]})}else s=m.jsx(we,{href:n.data.linksSet.installation,testId:"install-instructions-link",children:o("installationInstructions")});return m.jsx(De,{title:i("resources"),testId:"resources",accent:!0,children:m.jsxs("div",{className:"st-resources-links",children:[m.jsxs("div",{children:[s,m.jsx(we,{href:"https://github.com/openvinotoolkit/openvino/releases",testId:"previous-releases-link",children:o("prevReleases")}),m.jsx(we,{href:r.metadata.systemRequirementsLink,testId:"system-requirements-link",children:o("systemRequirements")})]}),m.jsxs("div",{children:[m.jsx(we,{href:r.metadata.getStartedLink,testId:"get-started-link",children:o("getStarted")}),m.jsx(we,{href:r.metadata.troubleshootingLink,testId:"troubleshooting-link",children:o("troubleshooting")})]})]})})}const sn={toggleButton:"spark-button spark-button-size-l spark-focus-visible spark-focus-visible-self spark-focus-visible-snap",toggleButtonGroup:"spark-button-group spark-button-group-orientation-horizontal spark-button-group-align-start spark-button-group-spacing-l",actionButton:"spark-button-action",secondaryButton:"spark-button-secondary",disabledButton:"spark-button-disabled",buttonContent:"spark-button-content",fontXs:"spark-font-25"},xy=({onClick:e,checked:t=!1,disabled:n=!1,title:r,subtitle:i,value:o})=>m.jsx("button",{className:`${sn.toggleButton} ${t?sn.actionButton:sn.secondaryButton} ${n&&sn.disabledButton}`,type:"button",role:"radio","aria-checked":t,onClick:()=>e==null?void 0:e(),"data-cy":o,"aria-label":r,children:m.jsxs("span",{className:sn.buttonContent,children:[m.jsx("span",{className:"title",children:r}),i&&m.jsx("span",{className:`${sn.fontXs} subtitle`,children:i})]})}),Oy=({children:e,className:t})=>m.jsx("div",{className:`option-button-group ${t||""} ${sn.toggleButtonGroup}`,children:e});function ki({title:e,options:t,level:n}){const r=F.useContext(zd),i=t.map(({level:o,key:s,checked:l,metadata:a})=>m.jsx(xy,{value:`${o}_${s}`,checked:l,title:a.title,subtitle:a.subtitle,onClick:()=>r(o,s)},s));return m.jsx(De,{title:e,testId:n,children:m.jsx(Oy,{children:i})})}function Py({state:e}){const t=e.PACKAGE.nodes,n=e.VERSION.nodes,r=e.OP_SYSTEM.nodes,i=e.DISTRIBUTION.nodes;F.useEffect(()=>He.combinationView(),[e]);const{t:o}=M("translation",{keyPrefix:"selectorForm.titles"});return m.jsxs(m.Fragment,{children:[m.jsx(ki,{title:o("package"),options:t,level:T.PACKAGE}),m.jsx(ki,{title:o("version"),options:n,level:T.VERSION}),m.jsx(ki,{title:o("os"),options:r,level:T.OP_SYSTEM}),m.jsx(ki,{title:o("distribution"),options:i,level:T.DISTRIBUTION})]})}const{SelectorContext:zd,useSelector:Ny}=qv();He.initialize(window.parent);function _y(){const[e,t]=Ny();return m.jsx("div",{className:`st-responsive-container ${Fd?"idz-page":""}`,children:m.jsxs(zd.Provider,{value:t,children:[m.jsx(Py,{state:e}),m.jsx(wy,{state:e}),m.jsx(Sy,{state:e}),m.jsx(ky,{})]})})}ds.createRoot(document.getElementById("root")).render(m.jsx(np.StrictMode,{children:m.jsx(_y,{})})); diff --git a/docs/sphinx_setup/index.rst b/docs/sphinx_setup/index.rst index 94c0332790663a..d0da8fa4244dd6 100644 --- a/docs/sphinx_setup/index.rst +++ b/docs/sphinx_setup/index.rst @@ -28,7 +28,7 @@ hardware and environments, on-premises and on-device, in the browser or in the c
  • New GenAI API

    Generative AI in only a few lines of code!

    - Check out our guide + Check out our guide
  • OpenVINO models on Hugging Face!

    @@ -194,6 +194,7 @@ Key Features GET STARTED LEARN OPENVINO - OPENVINO WORKFLOW + HOW TO USE - MAIN WORKFLOW + HOW TO USE - GENERATIVE AI WORKFLOW DOCUMENTATION ABOUT OPENVINO \ No newline at end of file diff --git a/src/frontends/tensorflow/src/frontend.cpp b/src/frontends/tensorflow/src/frontend.cpp index af609088679e14..006a4e22e06304 100644 --- a/src/frontends/tensorflow/src/frontend.cpp +++ b/src/frontends/tensorflow/src/frontend.cpp @@ -471,7 +471,7 @@ std::shared_ptr FrontEnd::convert(const ov::frontend::InputModel::Ptr "provides conversion extension(s): " << unsupported_ops_from_tokenizers << ". Install OpenVINO Tokenizers, refer to the documentation: " - "https://docs.openvino.ai/2024/learn-openvino/llm_inference_guide/ov-tokenizers.html \n"; + "https://docs.openvino.ai/2024/openvino-workflow-generative/ov-tokenizers.html \n"; } } From 0848f8630aca8e33bfbf56b68809d81c3a906c21 Mon Sep 17 00:00:00 2001 From: Maxim Vafin Date: Fri, 17 Jan 2025 15:57:06 +0100 Subject: [PATCH 4/4] [PT FE] Improve support for complex data type (#28482) ### Details: - *Remove transformations for FFT* - *Use `ComplexTypeMark` to provide information about a complex type* ### Tickets: - *CVS-159375* --------- Signed-off-by: Maxim Vafin Co-authored-by: Roman Kazantsev --- src/frontends/pytorch/src/frontend.cpp | 9 +- src/frontends/pytorch/src/op/complex.cpp | 84 +++++++ src/frontends/pytorch/src/op/fft.cpp | 208 ++++++++++++++++++ src/frontends/pytorch/src/op/permute.cpp | 35 ++- src/frontends/pytorch/src/op/reshape.cpp | 26 ++- src/frontends/pytorch/src/op/size.cpp | 23 +- src/frontends/pytorch/src/op/stft.cpp | 9 +- src/frontends/pytorch/src/op_table.cpp | 21 +- .../transforms/irfftn_complex_replacer.cpp | 164 -------------- .../transforms/irfftn_complex_replacer.hpp | 24 -- .../src/transforms/rfftn_complex_replacer.cpp | 163 -------------- .../src/transforms/rfftn_complex_replacer.hpp | 24 -- src/frontends/pytorch/src/utils.cpp | 24 +- src/frontends/pytorch/src/utils.hpp | 4 +- .../layer_tests/pytorch_tests/test_permute.py | 43 ++-- .../layer_tests/pytorch_tests/test_reshape.py | 44 ++-- tests/layer_tests/pytorch_tests/test_size.py | 30 ++- tests/layer_tests/pytorch_tests/test_stft.py | 12 +- 18 files changed, 497 insertions(+), 450 deletions(-) create mode 100644 src/frontends/pytorch/src/op/complex.cpp create mode 100644 src/frontends/pytorch/src/op/fft.cpp delete mode 100644 src/frontends/pytorch/src/transforms/irfftn_complex_replacer.cpp delete mode 100644 src/frontends/pytorch/src/transforms/irfftn_complex_replacer.hpp delete mode 100644 src/frontends/pytorch/src/transforms/rfftn_complex_replacer.cpp delete mode 100644 src/frontends/pytorch/src/transforms/rfftn_complex_replacer.hpp diff --git a/src/frontends/pytorch/src/frontend.cpp b/src/frontends/pytorch/src/frontend.cpp index 04ba9a9c92c281..bb69e8fa313130 100644 --- a/src/frontends/pytorch/src/frontend.cpp +++ b/src/frontends/pytorch/src/frontend.cpp @@ -30,7 +30,6 @@ #include "transforms/dict_resolver.hpp" #include "transforms/einsum_list_construct.hpp" #include "transforms/index_loop_getitem_replacer.hpp" -#include "transforms/irfftn_complex_replacer.hpp" #include "transforms/listconstruct_replacer.hpp" #include "transforms/min_max_prim_list_construct_replacer.hpp" #include "transforms/prim_list_construct_pad.hpp" @@ -40,7 +39,6 @@ #include "transforms/quantized_node_remover.hpp" #include "transforms/remove_packing_ops.hpp" #include "transforms/reverseprop_resolver.hpp" -#include "transforms/rfftn_complex_replacer.hpp" #include "transforms/softmax_reshape_elimination.hpp" #include "transforms/string_equality_replacer.hpp" #include "transforms/torchfx_gptq_pattern_replacer.hpp" @@ -69,6 +67,11 @@ std::map get_unconverted_types_from_model(const std::s if (!unconverted_ops_types.count(op_type_it->second)) { unconverted_ops_types.emplace(op_type_it->second, std::move(exception_msg)); } + } else if (const auto& fw_node = ov::as_type_ptr(node)) { + auto op_type = std::string(fw_node->get_type_name()); + if (!unconverted_ops_types.count(op_type)) { + unconverted_ops_types.emplace(op_type, "This is OpenVINO internal type."); + } } if (const auto& fw_node = ov::as_type_ptr(node)) { for (size_t i = 0; i < fw_node->get_internal_subgraphs_size(); ++i) { @@ -283,8 +286,6 @@ void FrontEnd::normalize(const std::shared_ptr& model) const { manager.register_pass(); manager.register_pass(); manager.register_pass(); - manager.register_pass(); - manager.register_pass(); manager.register_pass(); manager.register_pass(); manager.register_pass(); diff --git a/src/frontends/pytorch/src/op/complex.cpp b/src/frontends/pytorch/src/op/complex.cpp new file mode 100644 index 00000000000000..8ec0f5435e358b --- /dev/null +++ b/src/frontends/pytorch/src/op/complex.cpp @@ -0,0 +1,84 @@ +// Copyright (C) 2018-2025 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/frontend/complex_type_mark.hpp" +#include "openvino/frontend/pytorch/node_context.hpp" +#include "openvino/op/concat.hpp" +#include "openvino/op/split.hpp" +#include "openvino/op/squeeze.hpp" +#include "openvino/op/unsqueeze.hpp" +#include "utils.hpp" + +namespace ov { +namespace frontend { +namespace pytorch { +namespace op { + +using namespace ov::op; + +OutputVector translate_complex(const NodeContext& context) { + num_inputs_check(context, 2, 2); + auto real = context.get_input(0); + auto imag = context.get_input(1); + + auto const_neg_1 = context.mark_node(v0::Constant::create(element::i32, Shape{}, {-1})); + real = context.mark_node(std::make_shared(real, const_neg_1)); + imag = context.mark_node(std::make_shared(imag, const_neg_1)); + + auto complex = context.mark_node(std::make_shared(OutputVector{real, imag}, -1)); + + return {context.mark_node(std::make_shared(complex, complex->get_element_type()))}; +}; + +OutputVector translate_imag(const NodeContext& context) { + num_inputs_check(context, 1, 1, true); + auto complex = context.get_input(0); + + auto complex_type_mark = as_type_ptr(complex.get_node_shared_ptr()); + PYTORCH_OP_CONVERSION_CHECK(complex_type_mark, "aten::imag operation expects complex type tensor on input."); + + complex = complex_type_mark->input_value(0); + auto axis = context.mark_node(v0::Constant::create(element::i32, Shape{}, {-1})); + auto imag = context.mark_node(std::make_shared(complex, axis, 2))->output(1); + + auto const_neg_1 = context.mark_node(v0::Constant::create(element::i32, Shape{}, {-1})); + return {context.mark_node(std::make_shared(imag, const_neg_1))}; +}; + +OutputVector translate_real(const NodeContext& context) { + num_inputs_check(context, 1, 1, true); + auto complex = context.get_input(0); + + auto complex_type_mark = as_type_ptr(complex.get_node_shared_ptr()); + PYTORCH_OP_CONVERSION_CHECK(complex_type_mark, "aten::real operation expects complex type tensor on input."); + + complex = complex_type_mark->input_value(0); + auto axis = context.mark_node(v0::Constant::create(element::i32, Shape{}, {-1})); + auto real = context.mark_node(std::make_shared(complex, axis, 2))->output(0); + + auto const_neg_1 = context.mark_node(v0::Constant::create(element::i32, Shape{}, {-1})); + return {context.mark_node(std::make_shared(real, const_neg_1))}; +}; + +OutputVector translate_view_as_real(const NodeContext& context) { + num_inputs_check(context, 1, 1, true); + auto complex = context.get_input(0); + + auto complex_type_mark = as_type_ptr(complex.get_node_shared_ptr()); + PYTORCH_OP_CONVERSION_CHECK(complex_type_mark, "aten::real operation expects complex type tensor on input."); + + return {complex_type_mark->input_value(0)}; +}; + +OutputVector translate_view_as_complex(const NodeContext& context) { + num_inputs_check(context, 1, 1); + auto complex = context.get_input(0); + + return {context.mark_node(std::make_shared(complex, complex.get_element_type()))}; +}; + +} // namespace op +} // namespace pytorch +} // namespace frontend +} // namespace ov diff --git a/src/frontends/pytorch/src/op/fft.cpp b/src/frontends/pytorch/src/op/fft.cpp new file mode 100644 index 00000000000000..0c2eb17c49d305 --- /dev/null +++ b/src/frontends/pytorch/src/op/fft.cpp @@ -0,0 +1,208 @@ +// Copyright (C) 2018-2025 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/frontend/complex_type_mark.hpp" +#include "openvino/frontend/pytorch/node_context.hpp" +#include "openvino/op/divide.hpp" +#include "openvino/op/equal.hpp" +#include "openvino/op/gather.hpp" +#include "openvino/op/irdft.hpp" +#include "openvino/op/multiply.hpp" +#include "openvino/op/range.hpp" +#include "openvino/op/rdft.hpp" +#include "openvino/op/reduce_prod.hpp" +#include "openvino/op/reshape.hpp" +#include "openvino/op/scatter_update.hpp" +#include "openvino/op/select.hpp" +#include "openvino/op/shape_of.hpp" +#include "openvino/op/sqrt.hpp" +#include "openvino/op/squeeze.hpp" +#include "openvino/op/subtract.hpp" +#include "utils.hpp" + +namespace ov { +namespace frontend { +namespace pytorch { +namespace op { + +using namespace ov::op; + +OutputVector translate_fft_rfftn(const NodeContext& context) { + // aten::fft_rfftn(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor + num_inputs_check(context, 1, 4); + auto input = context.get_input(0); + + auto const_neg_1 = context.mark_node(v0::Constant::create(element::i32, Shape{}, {-1})); + auto const_0 = context.mark_node(v0::Constant::create(element::i32, Shape{}, {0})); + auto const_1 = context.mark_node(v0::Constant::create(element::i32, Shape{}, {1})); + + Output input_shape; + Output input_rank_scalar; + std::tie(input_shape, input_rank_scalar) = get_shape_rank(context, input, true); + + Output raw_s; + // Inputs can be either none or List. Check whether input values should be used or should be set to default values. + if (!context.input_is_none(1)) { + // s is provided, load from input. + raw_s = get_input_concat_if_list(context, 1); + raw_s = context.mark_node(std::make_shared(raw_s, element::i32)); + } + Output dim; + // Handle dim parameter containing vector of integers indicating dimensions to be transformed. + if (!context.input_is_none(2)) { + // dim is provided, load from input. + dim = get_input_concat_if_list(context, 2); + dim = context.mark_node(std::make_shared(dim, element::i32)); + } else if (!context.input_is_none(1)) { + // If dim is default and s is provided, use last s_len dimensions where s_len is length of s. + auto s_len = context.mark_node(std::make_shared(raw_s, element::i32)); + auto slice_start = context.mark_node(std::make_shared(input_rank_scalar, s_len)); + auto slice_start_scalar = context.mark_node(std::make_shared(slice_start)); + dim = context.mark_node( + std::make_shared(slice_start_scalar, input_rank_scalar, const_1, element::i32)); + } else { + // Dim and s are set to default, use all of dimensions. + dim = context.mark_node(std::make_shared(const_0, input_rank_scalar, const_1, element::i32)); + } + + Output s; + if (context.input_is_none(1)) { + // Value for s was set to default, use full size for all dimensions. + s = context.mark_node(std::make_shared(input_shape, dim, const_0)); + } else { + // Values for s were provided. Replace -1 values with default full size in given dimension. + auto full_s_cond = context.mark_node(std::make_shared(raw_s, const_neg_1)); + auto full_s_values = context.mark_node(std::make_shared(input_shape, dim, const_0)); + s = context.mark_node(std::make_shared(full_s_cond, full_s_values, raw_s)); + } + + // Handle norm parameter indicating normalization mode to use. Defaults to "backward". + std::string norm = "backward"; + if (!context.input_is_none(3)) { + norm = context.const_input(3); + } + + auto rdft = context.mark_node(std::make_shared(input, dim, s)); + + // Apply normalizations + auto n_int = context.mark_node(std::make_shared(s, const_0)); + auto n = context.mark_node(std::make_shared(n_int, rdft)); + Output normalized_rfftn; + if (norm == "forward") { + // Normalize by 1/n + normalized_rfftn = context.mark_node(std::make_shared(rdft, n)); + } else if (norm == "backward") { + // No normalization + normalized_rfftn = rdft; + } else if (norm == "ortho") { + // Normalize by 1/sqrt(n) + auto sqrt_n = context.mark_node(std::make_shared(n)); + normalized_rfftn = context.mark_node(std::make_shared(rdft, sqrt_n)); + } else { + FRONT_END_THROW( + "aten::fft_rfftn: unrecognized normalization mode. Only forward, backward and ortho are supported."); + } + + return {std::make_shared(normalized_rfftn, normalized_rfftn.get_element_type())}; +} + +OutputVector translate_fft_irfftn(const NodeContext& context) { + // aten::fft_irfftn(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor + num_inputs_check(context, 1, 4, true); + auto input = context.get_input(0); + + auto complex_type_mark = as_type_ptr(input.get_node_shared_ptr()); + PYTORCH_OP_CONVERSION_CHECK(complex_type_mark, "aten::fft_irfftn operation expects complex type tensor on input."); + input = complex_type_mark->input_value(0); + + auto const_neg_1 = context.mark_node(v0::Constant::create(element::i32, Shape{1}, {-1})); + auto const_0 = context.mark_node(v0::Constant::create(element::i32, Shape{1}, {0})); + auto const_scalar_0 = context.mark_node(v0::Constant::create(element::i32, Shape{}, {0})); + auto const_1 = context.mark_node(v0::Constant::create(element::i32, Shape{1}, {1})); + auto const_scalar_1 = context.mark_node(v0::Constant::create(element::i32, Shape{}, {1})); + auto const_2 = context.mark_node(v0::Constant::create(element::i32, Shape{1}, {2})); + + // Input shape of complex number (excluding dimension created by concatenation of real and imag) + auto complex_input_shape = get_complex_shape(context, input); + auto input_rank = context.mark_node(std::make_shared(complex_input_shape, element::i32)); + auto input_rank_scalar = context.mark_node(std::make_shared(input_rank)); + + Output raw_s; + // Inputs can be either none or List. Check whether input values should be used or should be set to default values. + if (!context.input_is_none(1)) { + // s is provided, load from input. + raw_s = get_input_concat_if_list(context, 1); + raw_s = context.mark_node(std::make_shared(raw_s, element::i32)); + } + + // Handle dim parameter containing vector of integers indicating dimensions to be transformed. + Output dim; + if (!context.input_is_none(2)) { + // Dim values is provided, load from input. + dim = get_input_concat_if_list(context, 2); + dim = context.mark_node(std::make_shared(dim, element::i32)); + } else if (!context.input_is_none(1)) { + // If dim is default and s is provided, use last s_len dimensions where s_len is length of s. + auto s_len = context.mark_node(std::make_shared(raw_s, element::i32)); + auto range_start = context.mark_node(std::make_shared(input_rank, s_len)); + auto range_start_scalar = context.mark_node(std::make_shared(range_start)); + dim = context.mark_node( + std::make_shared(range_start_scalar, input_rank_scalar, const_scalar_1, element::i32)); + } else { + // Dim and s are set to default, use all of dimensions. + dim = context.mark_node( + std::make_shared(const_scalar_0, input_rank_scalar, const_scalar_1, element::i32)); + } + + // Calculate default s values. Use full available size except last element, which is set to even value in last + // dimension: s[-1] = 2 * (complex_input_shape[dim[-1]]) + auto default_s_raw = context.mark_node(std::make_shared(complex_input_shape, dim, const_0)); + auto last_s = context.mark_node(std::make_shared(default_s_raw, const_neg_1, const_0)); + auto last_s_m_1 = context.mark_node(std::make_shared(last_s, const_1)); + auto s_upd = context.mark_node(std::make_shared(last_s_m_1, const_2)); + auto s_shape = context.mark_node(std::make_shared(default_s_raw, element::i32)); + auto last_s_idx = context.mark_node(std::make_shared(s_shape, const_1)); + auto default_s = context.mark_node(std::make_shared(default_s_raw, last_s_idx, s_upd, const_0)); + + // Handle s parameter containing vector of intigers indicating signal sizes for dimensions. + Output s; + if (!context.input_is_none(1)) { + // Values for s were provided. Replace -1 values with default full size in given dimension. + auto full_s_cond = context.mark_node(std::make_shared(raw_s, const_neg_1)); + s = context.mark_node(std::make_shared(full_s_cond, default_s, raw_s)); + } else { + // Value for s was set to default. + s = default_s; + } + + // Handle norm parameter indicating normalization mode to use. Defaults to "backward". + std::string norm = "backward"; + if (!context.input_is_none(3)) { + norm = context.const_input(3); + } + + auto irdft = context.mark_node(std::make_shared(input, dim, s)); + + // Apply normalizations. + auto n_int = context.mark_node(std::make_shared(s, const_0)); + auto n = context.mark_node(std::make_shared(n_int, irdft)); + Output normalized_irfftn; + if (norm == "forward") { + normalized_irfftn = context.mark_node(std::make_shared(irdft, n)); + } else if (norm == "backward") { + normalized_irfftn = irdft; + } else if (norm == "ortho") { + auto sqrt_n = context.mark_node(std::make_shared(n)); + normalized_irfftn = context.mark_node(std::make_shared(irdft, sqrt_n)); + } else { + FRONT_END_THROW( + "aten::fft_irfftn: unrecognized normalization mode. Only forward, backward and ortho are supported."); + } + return {normalized_irfftn}; +} + +} // namespace op +} // namespace pytorch +} // namespace frontend +} // namespace ov diff --git a/src/frontends/pytorch/src/op/permute.cpp b/src/frontends/pytorch/src/op/permute.cpp index 46016ca8ca16a0..c724e38b8077b2 100644 --- a/src/frontends/pytorch/src/op/permute.cpp +++ b/src/frontends/pytorch/src/op/permute.cpp @@ -3,7 +3,10 @@ // #include "openvino/core/validation_util.hpp" +#include "openvino/frontend/complex_type_mark.hpp" #include "openvino/frontend/pytorch/node_context.hpp" +#include "openvino/op/concat.hpp" +#include "openvino/op/subtract.hpp" #include "openvino/op/transpose.hpp" #include "utils.hpp" @@ -12,17 +15,41 @@ namespace frontend { namespace pytorch { namespace op { +using namespace ov::op; + OutputVector translate_permute(const NodeContext& context) { - num_inputs_check(context, 2, 2); + num_inputs_check(context, 2, 2, true); auto data = context.get_input(0); auto order = get_input_concat_if_list(context, 1); - auto rank = std::get<1>(get_shape_rank(context, data)); - auto rank_converted = context.mark_node(std::make_shared(rank, order)); + + Output rank; + auto complex_type_mark = as_type_ptr(data.get_node_shared_ptr()); + if (complex_type_mark) { + data = complex_type_mark->input_value(0); + rank = std::get<1>(get_shape_rank(context, data)); + auto const_1 = context.mark_node(v0::Constant::create(element::i32, Shape{}, {1})); + rank = context.mark_node(std::make_shared(rank, const_1)); + } else { + rank = std::get<1>(get_shape_rank(context, data)); + } + + auto rank_converted = context.mark_node(std::make_shared(rank, order)); auto order_normalized = normalize_axis(context, order, rank_converted); + + if (complex_type_mark) { + auto to_concat = OutputVector{order_normalized, rank_converted}; + order_normalized = context.mark_node(std::make_shared(to_concat, 0)); + } + if (const auto order_const = ov::util::get_constant_from_source(order_normalized)) { order_normalized = order_const; } - return {context.mark_node(std::make_shared(data, order_normalized))}; + auto permute = context.mark_node(std::make_shared(data, order_normalized)); + if (complex_type_mark) { + const auto& complex_dtype = complex_type_mark->get_complex_part_type(); + permute = context.mark_node(std::make_shared(permute, complex_dtype)); + } + return {permute}; } } // namespace op diff --git a/src/frontends/pytorch/src/op/reshape.cpp b/src/frontends/pytorch/src/op/reshape.cpp index 7524d0e3c4aaf4..b9dcfc8d9afc4a 100644 --- a/src/frontends/pytorch/src/op/reshape.cpp +++ b/src/frontends/pytorch/src/op/reshape.cpp @@ -4,6 +4,7 @@ #include "openvino/op/reshape.hpp" +#include "openvino/frontend/complex_type_mark.hpp" #include "openvino/frontend/pytorch/node_context.hpp" #include "openvino/op/concat.hpp" #include "openvino/op/squeeze.hpp" @@ -15,15 +16,34 @@ namespace frontend { namespace pytorch { namespace op { +using namespace ov::op; + OutputVector translate_reshape(const NodeContext& context) { // Translation is used by both aten::view and aten::reshape. // Schema: aten::view(Tensor input, int[] shape) -> Tensor // Schema: aten::reshape(Tensor input, int[] shape) -> Tensor // For shape parameter, int[] is converted into single dimensional Tensor. - num_inputs_check(context, 2, 2); + num_inputs_check(context, 2, 2, true); + auto tensor = context.get_input(0); auto shape = get_input_concat_if_list(context, 1); - auto reshape = std::make_shared(context.get_input(0), shape, false); - return {context.mark_node(reshape)}; + + auto complex_type_mark = as_type_ptr(tensor.get_node_shared_ptr()); + if (complex_type_mark) { + tensor = complex_type_mark->input_value(0); + auto const_2 = context.mark_node(v0::Constant::create(element::i32, Shape{1}, {2})); + const_2 = context.mark_node(std::make_shared(const_2, shape)); + + shape = context.mark_node(std::make_shared(OutputVector{shape, const_2}, 0)); + } + + auto reshape = context.mark_node(std::make_shared(tensor, shape, false)); + + if (complex_type_mark) { + const auto& complex_dtype = complex_type_mark->get_complex_part_type(); + return {context.mark_node(std::make_shared(reshape, complex_dtype))}; + } else { + return {reshape}; + } }; } // namespace op diff --git a/src/frontends/pytorch/src/op/size.cpp b/src/frontends/pytorch/src/op/size.cpp index d8f1ee28123c10..2eca5f2707e53d 100644 --- a/src/frontends/pytorch/src/op/size.cpp +++ b/src/frontends/pytorch/src/op/size.cpp @@ -2,10 +2,12 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "openvino/frontend/complex_type_mark.hpp" #include "openvino/frontend/pytorch/node_context.hpp" #include "openvino/op/constant.hpp" #include "openvino/op/gather.hpp" #include "openvino/op/shape_of.hpp" +#include "openvino/op/slice.hpp" #include "utils.hpp" namespace ov { @@ -16,10 +18,25 @@ namespace op { using namespace ov::op; OutputVector translate_size(const NodeContext& context) { - num_inputs_check(context, 1, 2); - auto shape = context.mark_node(std::make_shared(context.get_input(0), element::i64)); + num_inputs_check(context, 1, 2, true); + auto data = context.get_input(0); + Output shape; + + auto complex_type_mark = as_type_ptr(data.get_node_shared_ptr()); + if (complex_type_mark) { + data = complex_type_mark->input_value(0); + shape = context.mark_node(std::make_shared(data, element::i64)); + + auto zero = context.mark_node(v0::Constant::create(element::i32, Shape{1}, {0})); + auto stop = context.mark_node(v0::Constant::create(element::i32, Shape{1}, {-1})); + auto step = context.mark_node(v0::Constant::create(element::i32, Shape{1}, {1})); + shape = context.mark_node(std::make_shared(shape, zero, stop, step, zero)); + } else { + shape = context.mark_node(std::make_shared(data, element::i64)); + } + if (context.input_is_none(1)) { - return shape->outputs(); + return {shape}; } else { auto axis_0 = context.mark_node(v0::Constant::create(element::i32, Shape{}, {0})); return {context.mark_node(std::make_shared(shape, context.get_input(1), axis_0))}; diff --git a/src/frontends/pytorch/src/op/stft.cpp b/src/frontends/pytorch/src/op/stft.cpp index 8e478835fdcdd6..678f44dcbe1edf 100644 --- a/src/frontends/pytorch/src/op/stft.cpp +++ b/src/frontends/pytorch/src/op/stft.cpp @@ -4,6 +4,7 @@ #include "openvino/op/stft.hpp" +#include "openvino/frontend/complex_type_mark.hpp" #include "openvino/frontend/pytorch/node_context.hpp" #include "openvino/op/broadcast.hpp" #include "openvino/op/constant.hpp" @@ -78,8 +79,6 @@ OutputVector translate_stft(const NodeContext& context) { if (!context.input_is_none(7)) { return_complex = context.const_input(7); } - PYTORCH_OP_CONVERSION_CHECK(!return_complex, - "aten::stft conversion is currently supported with return_complex=False only."); // Perform STFT constexpr bool transpose_frames = true; @@ -88,8 +87,10 @@ OutputVector translate_stft(const NodeContext& context) { if (normalized) { const auto nfft_convert = context.mark_node(std::make_shared(n_fft, stft)); const auto divisor = context.mark_node(std::make_shared(nfft_convert)); - const auto norm_stft = context.mark_node(std::make_shared(stft, divisor)); - return {norm_stft}; + stft = context.mark_node(std::make_shared(stft, divisor)); + } + if (return_complex) { + return {context.mark_node(std::make_shared(stft, stft->get_element_type()))}; } else { return {stft}; } diff --git a/src/frontends/pytorch/src/op_table.cpp b/src/frontends/pytorch/src/op_table.cpp index fe4e84bd47d45e..f00391e08e2a32 100644 --- a/src/frontends/pytorch/src/op_table.cpp +++ b/src/frontends/pytorch/src/op_table.cpp @@ -59,6 +59,7 @@ OP_CONVERTER(translate_celu); OP_CONVERTER(translate_channel_shuffle); OP_CONVERTER(translate_clamp); OP_CONVERTER(translate_col2im); +OP_CONVERTER(translate_complex); OP_CONVERTER(translate_constant); OP_CONVERTER(translate_conv_transposend); OP_CONVERTER(translate_convnd); @@ -86,6 +87,8 @@ OP_CONVERTER(translate_expm1); OP_CONVERTER(translate_eye); OP_CONVERTER(translate_fake_quantize_per_channel_affine); OP_CONVERTER(translate_fake_quantize_per_tensor_affine); +OP_CONVERTER(translate_fft_irfftn); +OP_CONVERTER(translate_fft_rfftn); OP_CONVERTER(translate_fill); OP_CONVERTER(translate_fill_diagonal); OP_CONVERTER(translate_flatten); @@ -108,6 +111,7 @@ OP_CONVERTER(translate_hann_window); OP_CONVERTER(translate_hardtanh); OP_CONVERTER(translate_if); OP_CONVERTER(translate_im2col); +OP_CONVERTER(translate_imag); OP_CONVERTER(translate_index); OP_CONVERTER(translate_index_add); OP_CONVERTER(translate_index_copy_); @@ -192,6 +196,7 @@ OP_CONVERTER(translate_randn); OP_CONVERTER(translate_randint); OP_CONVERTER(translate_rand_like); OP_CONVERTER(translate_randn_like); +OP_CONVERTER(translate_real); OP_CONVERTER(translate_reciprocal); OP_CONVERTER(translate_relu6); OP_CONVERTER(translate_remainder); @@ -246,6 +251,8 @@ OP_CONVERTER(translate_upsample_nearest3d); OP_CONVERTER(translate_upsample_trilinear3d); OP_CONVERTER(translate_var); OP_CONVERTER(translate_var_mean); +OP_CONVERTER(translate_view_as_complex); +OP_CONVERTER(translate_view_as_real); OP_CONVERTER(translate_weight_norm); OP_CONVERTER(translate_where); OP_CONVERTER(translate_zeros); @@ -423,7 +430,7 @@ const std::unordered_map get_supported_ops_ts() { {"aten::clip", op::translate_clamp}, {"aten::clone", op::skip_node}, // ignore clone operators that are inserted by PyTorch autograd {"aten::col2im", op::translate_col2im}, - // aten::complex - Supported in limited set of patterns + {"aten::complex", op::translate_complex}, {"aten::concat", op::translate_cat}, {"aten::contiguous", op::skip_node}, // In openvino how tensors are stored in memory is internal plugin detail, // we assume all tensors are contiguous @@ -468,8 +475,8 @@ const std::unordered_map get_supported_ops_ts() { {"aten::fake_quantize_per_channel_affine", op::translate_fake_quantize_per_channel_affine}, {"aten::fake_quantize_per_tensor_affine", op::translate_fake_quantize_per_tensor_affine}, {"aten::feature_dropout", op::skip_node}, - // aten::fft_irfftn - Supported in limited set of patterns - // aten::fft_rfftn - Supported in limited set of patterns + {"aten::fft_irfftn", op::translate_fft_irfftn}, + {"aten::fft_rfftn", op::translate_fft_rfftn}, {"aten::fill", op::translate_fill}, {"aten::fill_diagonal", op::translate_fill_diagonal}, {"aten::flatten", op::quantizable_op}, @@ -496,7 +503,7 @@ const std::unordered_map get_supported_ops_ts() { {"aten::hardswish", op::quantizable_op>}, {"aten::hardtanh", op::quantizable_op}, {"aten::im2col", op::translate_im2col}, - // aten::imag - Supported in limited set of patterns + {"aten::imag", op::translate_imag}, // aten::index - Supported in limited set of patterns {"aten::index_copy_", op::inplace_op}, {"aten::index_fill_", op::inplace_op}, @@ -604,7 +611,7 @@ const std::unordered_map get_supported_ops_ts() { {"aten::randint", op::translate_randint}, {"aten::randn", op::translate_randn}, {"aten::randn_like", op::translate_randn_like}, - // aten::real - Supported in limited set of patterns + {"aten::real", op::translate_real}, {"aten::reciprocal", op::optional_out}, {"aten::reciprocal_", op::inplace_op}, // aten::reflection_pad2d - Supported in limited set of patterns @@ -696,6 +703,8 @@ const std::unordered_map get_supported_ops_ts() { {"aten::var_mean", op::translate_var_mean}, {"aten::view", op::quantizable_op}, {"aten::view_as", op::translate_reshape_as}, + {"aten::view_as_complex", op::translate_view_as_complex}, + {"aten::view_as_real", op::translate_view_as_real}, {"aten::wait", op::skip_node}, {"aten::where", op::translate_where}, {"aten::zero", op::translate_zeros_like}, @@ -979,6 +988,8 @@ const std::unordered_map get_supported_ops_fx() { {"aten.var.correction", op::translate_var_fx}, {"aten.var_mean.correction", op::translate_var_mean_fx}, {"aten.view.default", op::translate_reshape}, + {"aten.view_as_complex.default", op::translate_view_as_complex}, + {"aten.view_as_real.default", op::translate_view_as_real}, {"aten.where.self", op::translate_where}, {"aten.zeros.default", op::translate_zeros_fx}, {"aten.zeros.names", op::translate_zeros_fx}, diff --git a/src/frontends/pytorch/src/transforms/irfftn_complex_replacer.cpp b/src/frontends/pytorch/src/transforms/irfftn_complex_replacer.cpp deleted file mode 100644 index cb80987e4511ae..00000000000000 --- a/src/frontends/pytorch/src/transforms/irfftn_complex_replacer.cpp +++ /dev/null @@ -1,164 +0,0 @@ -// Copyright (C) 2018-2025 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "irfftn_complex_replacer.hpp" - -#include "openvino/core/rt_info.hpp" -#include "openvino/op/concat.hpp" -#include "openvino/op/convert.hpp" -#include "openvino/op/convert_like.hpp" -#include "openvino/op/equal.hpp" -#include "openvino/op/gather.hpp" -#include "openvino/op/irdft.hpp" -#include "openvino/op/multiply.hpp" -#include "openvino/op/range.hpp" -#include "openvino/op/reduce_prod.hpp" -#include "openvino/op/scatter_update.hpp" -#include "openvino/op/select.hpp" -#include "openvino/op/shape_of.hpp" -#include "openvino/op/sqrt.hpp" -#include "openvino/op/squeeze.hpp" -#include "openvino/op/subtract.hpp" -#include "openvino/op/unsqueeze.hpp" -#include "openvino/op/util/framework_node.hpp" -#include "openvino/pass/pattern/matcher.hpp" -#include "openvino/pass/pattern/op/wrap_type.hpp" -#include "utils.hpp" - -namespace ov { -namespace frontend { -namespace pytorch { -namespace pass { - -using namespace ov::pass; -using namespace ov::op; - -IRFFTNComplexReplacer::IRFFTNComplexReplacer() { - // Transformation used to replace combination of aten::complex -> aten::fft_irfftn torch operators. - // Pattern: aten::complex -> aten::fft_irfftn - auto fft_op = pattern::wrap_type(); - - ov::matcher_pass_callback irfftn_callback = [](pattern::Matcher& m) { - // "aten::fft_irfftn(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor" - auto irfftn_op = cast_fw_node(m.get_match_root(), "aten::fft_irfftn"); - if (!irfftn_op) { - return false; - } - auto const_neg_1 = v0::Constant::create(element::i32, Shape{1}, {-1}); - auto const_0 = v0::Constant::create(element::i32, Shape{1}, {0}); - auto const_scalar_0 = v0::Constant::create(element::i32, Shape{}, {0}); - auto const_1 = v0::Constant::create(element::i32, Shape{1}, {1}); - auto const_scalar_1 = v0::Constant::create(element::i32, Shape{}, {1}); - auto const_2 = v0::Constant::create(element::i32, Shape{1}, {2}); - - // Check whether input node being aten::complex. - auto fw_node_complex_input = cast_fw_node(irfftn_op->input_value(0).get_node_shared_ptr(), "aten::complex"); - if (!fw_node_complex_input) { - return false; - } - - // Concatenate real and imag parts over additional, last dimension. - auto real = std::make_shared(fw_node_complex_input->input_value(0), const_neg_1); - auto imag = std::make_shared(fw_node_complex_input->input_value(1), const_neg_1); - NodeVector complex = {real, imag}; - auto input = std::make_shared(complex, -1); - - // Input shape of complex number (excluding dimension created by concatenation of real and imag) - auto complex_input_shape = std::make_shared(fw_node_complex_input->input_value(0), element::i32); - auto input_rank = std::make_shared(complex_input_shape, element::i32); - auto input_rank_scalar = std::make_shared(input_rank); - - // Inputs can be either none or ListConstruct. Check whether input values should be used or should be set to - // default values. - bool dim_use_default = is_none_node(irfftn_op->input_value(2)); - bool s_use_default = is_none_node(irfftn_op->input_value(1)); - // Can be None constant, when used check s_use_default. - auto raw_s_input_maybe = concat_list_construct(irfftn_op->input_value(1)); - raw_s_input_maybe = std::make_shared(raw_s_input_maybe, element::i32); - - // Handle dim parameter containing vector of integers indicating dimensions to be transformed. - std::shared_ptr dim; - if (!dim_use_default) { - // Dim values is provided, load from input. - dim = std::make_shared(concat_list_construct(irfftn_op->input_value(2)), element::i32); - } else if (!s_use_default) { - // If dim is default and s is provided, use last s_len dimensions where s_len is length of s. - auto s_len = std::make_shared(raw_s_input_maybe, element::i32); - auto range_start = std::make_shared(input_rank, s_len); - auto range_start_scalar = std::make_shared(range_start); - dim = std::make_shared(range_start_scalar, input_rank_scalar, const_scalar_1, element::i32); - } else { - // Dim and s are set to default, use all of dimensions. - dim = std::make_shared(const_scalar_0, input_rank_scalar, const_scalar_1, element::i32); - } - - // Calculate default s values. Use full available size except last element, which is set to even value in last - // dimension: s[-1] = 2 * (complex_input_shape[dim[-1]]) - auto default_s_raw = std::make_shared(complex_input_shape, dim, const_0); - auto last_s = std::make_shared(default_s_raw, const_neg_1, const_0); - auto last_s_m_1 = std::make_shared(last_s, const_1); - auto s_upd = std::make_shared(last_s_m_1, const_2); - auto s_shape = std::make_shared(default_s_raw, element::i32); - auto last_s_idx = std::make_shared(s_shape, const_1); - auto default_s = std::make_shared(default_s_raw, last_s_idx, s_upd, const_0); - - // Handle s parameter containing vector of intigers indicating signal sizes for dimensions. - std::shared_ptr s; - if (!s_use_default) { - // Values for s were provided. Replace -1 values with default full size in given dimension. - auto full_s_cond = std::make_shared(raw_s_input_maybe, const_neg_1); - s = std::make_shared(full_s_cond, default_s, raw_s_input_maybe); - } else { - // Value for s was set to default. - s = default_s; - } - - // Handle norm parameter indicating normalization mode to use. Defaults to "backward". - std::string norm; - if (const auto& fw_node_mode = - ov::as_type_ptr(irfftn_op->input_value(3).get_node_shared_ptr())) { - const auto& attrs = fw_node_mode->get_attrs(); - if (attrs.find("string_value") != attrs.end()) { - norm = attrs.at("string_value"); - } else { - norm = "backward"; - } - } else { - add_exception_to_fw_node(irfftn_op, "aten::fft_irfftn: could not retrive value for norm attribute."); - return false; - } - - auto irdft = std::make_shared(input, dim, s); - - // Apply normalizations. - auto n_int = std::make_shared(s, const_0); - auto n = std::make_shared(n_int, irdft); - std::shared_ptr normalized_irfftn; - if (norm == "forward") { - normalized_irfftn = std::make_shared(irdft, n); - } else if (norm == "backward") { - normalized_irfftn = irdft; - } else if (norm == "ortho") { - auto sqrt_n = std::make_shared(n); - normalized_irfftn = std::make_shared(irdft, sqrt_n); - } else { - add_exception_to_fw_node( - irfftn_op, - "aten::fft_irfftn: unrecognized normalization mode. Only forward, backward and ortho are supported."); - return false; - } - - copy_runtime_info({irfftn_op, fw_node_complex_input}, normalized_irfftn); - normalized_irfftn->set_friendly_name(irfftn_op->get_friendly_name()); - replace_node(irfftn_op, normalized_irfftn); - return true; - }; - auto m = std::make_shared(fft_op, "ov::frontend::pytorch::pass::IRFFTNComplexReplacer"); - this->register_matcher(m, irfftn_callback); -}; - -} // namespace pass -} // namespace pytorch -} // namespace frontend -} // namespace ov diff --git a/src/frontends/pytorch/src/transforms/irfftn_complex_replacer.hpp b/src/frontends/pytorch/src/transforms/irfftn_complex_replacer.hpp deleted file mode 100644 index c75c6e51f92571..00000000000000 --- a/src/frontends/pytorch/src/transforms/irfftn_complex_replacer.hpp +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright (C) 2018-2025 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "openvino/pass/graph_rewrite.hpp" -#include "openvino/pass/pass.hpp" - -namespace ov { -namespace frontend { -namespace pytorch { -namespace pass { - -class IRFFTNComplexReplacer : public ov::pass::MatcherPass { -public: - OPENVINO_MATCHER_PASS_RTTI("ov::frontend::pytorch::pass::IRFFTNComplexReplacer"); - IRFFTNComplexReplacer(); -}; - -} // namespace pass -} // namespace pytorch -} // namespace frontend -} // namespace ov diff --git a/src/frontends/pytorch/src/transforms/rfftn_complex_replacer.cpp b/src/frontends/pytorch/src/transforms/rfftn_complex_replacer.cpp deleted file mode 100644 index b90e3121930c71..00000000000000 --- a/src/frontends/pytorch/src/transforms/rfftn_complex_replacer.cpp +++ /dev/null @@ -1,163 +0,0 @@ -// Copyright (C) 2018-2025 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "rfftn_complex_replacer.hpp" - -#include "openvino/core/rt_info.hpp" -#include "openvino/op/convert.hpp" -#include "openvino/op/convert_like.hpp" -#include "openvino/op/divide.hpp" -#include "openvino/op/equal.hpp" -#include "openvino/op/gather.hpp" -#include "openvino/op/range.hpp" -#include "openvino/op/rdft.hpp" -#include "openvino/op/reduce_prod.hpp" -#include "openvino/op/select.hpp" -#include "openvino/op/shape_of.hpp" -#include "openvino/op/slice.hpp" -#include "openvino/op/split.hpp" -#include "openvino/op/sqrt.hpp" -#include "openvino/op/squeeze.hpp" -#include "openvino/op/subtract.hpp" -#include "openvino/op/util/framework_node.hpp" -#include "openvino/pass/pattern/matcher.hpp" -#include "openvino/pass/pattern/op/wrap_type.hpp" -#include "utils.hpp" - -namespace ov { -namespace frontend { -namespace pytorch { -namespace pass { - -using namespace ov::pass; -using namespace ov::op; - -RFFTNComplexReplacer::RFFTNComplexReplacer() { - // Transformation used to replace combination of aten::fft_rfftn -> {aten::real, aten::imag} torch operators. - // Pattern: aten::fft_rfftn -> {aten::real, aten::imag} - auto fft_op = pattern::wrap_type(); - ov::matcher_pass_callback rfftn_callback = [](pattern::Matcher& m) { - // Schema: "aten::fft_rfftn(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor" - auto rfftn_op = cast_fw_node(m.get_match_root(), "aten::fft_rfftn"); - if (!rfftn_op) { - return false; - } - auto const_neg_1 = v0::Constant::create(element::i32, Shape{}, {-1}); - auto const_0 = v0::Constant::create(element::i32, Shape{}, {0}); - auto const_1 = v0::Constant::create(element::i32, Shape{}, {1}); - - auto input = rfftn_op->input_value(0); - auto input_shape = std::make_shared(input, element::i32); - auto input_rank = std::make_shared(input_shape, element::i32); - auto input_rank_scalar = std::make_shared(input_rank); - - // Inputs can be either none or ListConstruct. Check whether input values should be used or should be set to - // default values. - bool dim_use_default = is_none_node(rfftn_op->input_value(2)); - bool s_use_default = is_none_node(rfftn_op->input_value(1)); - // Can be None constant, when used check s_use_default. - auto raw_s_input_maybe = concat_list_construct(rfftn_op->input_value(1)); - raw_s_input_maybe = std::make_shared(raw_s_input_maybe, element::i32); - - // Handle dim parameter containing vector of intigers indicating dimensions to be transformed. - std::shared_ptr dim; - if (!dim_use_default) { - // Dim values is provided, load from input. - dim = std::make_shared(concat_list_construct(rfftn_op->input_value(2)), element::i32); - } else if (!s_use_default) { - // If dim is default and s is provided, use last s_len dimensions where s_len is length of s. - auto s_len = std::make_shared(raw_s_input_maybe, element::i32); - auto slice_start = std::make_shared(input_rank, s_len); - auto slice_start_scalar = std::make_shared(slice_start); - dim = std::make_shared(slice_start_scalar, input_rank_scalar, const_1, element::i32); - } else { - // Dim and s are set to default, use all of dimensions. - dim = std::make_shared(const_0, input_rank_scalar, const_1, element::i32); - } - - // Handle s parameter containing vector of intigers indicating signal sizes for dimensions. - std::shared_ptr s; - if (!s_use_default) { - // Values for s were provided. Replace -1 values with default full size in given dimension. - auto full_s_cond = std::make_shared(raw_s_input_maybe, const_neg_1); - auto full_s_values = std::make_shared(input_shape, dim, const_0); - s = std::make_shared(full_s_cond, full_s_values, raw_s_input_maybe); - } else { - // Value for s was set to default, use full size for all dimensions. - s = std::make_shared(input_shape, dim, const_0); - } - - // Handle norm parameter indicating normalization mode to use. Defaults to "backward". - std::string norm; - if (const auto& fw_node_mode = - ov::as_type_ptr(rfftn_op->input_value(3).get_node_shared_ptr())) { - const auto& attrs = fw_node_mode->get_attrs(); - if (attrs.find("string_value") != attrs.end()) { - norm = attrs.at("string_value"); - } else { - norm = "backward"; - } - } else { - add_exception_to_fw_node(rfftn_op, "aten::fft_rfftn: could not retrive value for norm attribute."); - return false; - } - - auto rdft = std::make_shared(input, dim, s); - - // Apply normalizations - auto n_int = std::make_shared(s, const_0); - auto n = std::make_shared(n_int, rdft); - std::shared_ptr normalized_rfftn; - if (norm == "forward") { - // Normalize by 1/n - normalized_rfftn = std::make_shared(rdft, n); - } else if (norm == "backward") { - // No normalization - normalized_rfftn = rdft; - } else if (norm == "ortho") { - // Normalize by 1/sqrt(n) - auto sqrt_n = std::make_shared(n); - normalized_rfftn = std::make_shared(rdft, sqrt_n); - } else { - add_exception_to_fw_node( - rfftn_op, - "aten::fft_rfftn: unrecognized normalization mode. Only forward, backward and ortho are supported."); - return false; - } - - // Replace outputs that are either torch operators aten::real or aten::imag. Apply squeeze to remove last - // dimension used to concatenate. - auto normalized_rfftn_splitted = std::make_shared(normalized_rfftn, const_neg_1, 2); - auto rfftn_outs = rfftn_op->get_users(); - bool rval = false; - for (auto& out : rfftn_outs) { - if (auto real_op = cast_fw_node(out, "aten::real")) { - auto squeezed = std::make_shared(normalized_rfftn_splitted->output(0), const_neg_1); - copy_runtime_info({rfftn_op, real_op}, squeezed); - squeezed->set_friendly_name(real_op->get_friendly_name()); - replace_node(real_op, squeezed); - rval = true; - } - if (auto imag_op = cast_fw_node(out, "aten::imag")) { - auto squeezed = std::make_shared(normalized_rfftn_splitted->output(1), const_neg_1); - copy_runtime_info({rfftn_op, imag_op}, squeezed); - squeezed->set_friendly_name(imag_op->get_friendly_name()); - replace_node(imag_op, squeezed); - rval = true; - } - } - add_exception_to_fw_node( - rfftn_op, - "aten::fft_rfftn: Unsupported output node. Only aten::real and aten::imag are supported."); - return rval; - }; - - auto m = std::make_shared(fft_op, "ov::frontend::pytorch::pass::RFFTNComplexReplacer"); - this->register_matcher(m, rfftn_callback); -}; - -} // namespace pass -} // namespace pytorch -} // namespace frontend -} // namespace ov diff --git a/src/frontends/pytorch/src/transforms/rfftn_complex_replacer.hpp b/src/frontends/pytorch/src/transforms/rfftn_complex_replacer.hpp deleted file mode 100644 index 5420b7c9a01a04..00000000000000 --- a/src/frontends/pytorch/src/transforms/rfftn_complex_replacer.hpp +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright (C) 2018-2025 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "openvino/pass/graph_rewrite.hpp" -#include "openvino/pass/pass.hpp" - -namespace ov { -namespace frontend { -namespace pytorch { -namespace pass { - -class RFFTNComplexReplacer : public ov::pass::MatcherPass { -public: - OPENVINO_MATCHER_PASS_RTTI("ov::frontend::pytorch::pass::RFFTNComplexReplacer"); - RFFTNComplexReplacer(); -}; - -} // namespace pass -} // namespace pytorch -} // namespace frontend -} // namespace ov diff --git a/src/frontends/pytorch/src/utils.cpp b/src/frontends/pytorch/src/utils.cpp index da0b5c5cd24d61..70ba4171770fbd 100644 --- a/src/frontends/pytorch/src/utils.cpp +++ b/src/frontends/pytorch/src/utils.cpp @@ -7,6 +7,7 @@ #include "op_table.hpp" #include "openvino/core/rt_info.hpp" #include "openvino/core/validation_util.hpp" +#include "openvino/frontend/complex_type_mark.hpp" #include "openvino/frontend/pytorch/decoder.hpp" #include "openvino/op/add.hpp" #include "openvino/op/broadcast.hpp" @@ -40,15 +41,24 @@ namespace pytorch { using namespace ov::op; -void num_inputs_check(const NodeContext& context, size_t min_inputs, size_t max_inputs) { +void num_inputs_check(const NodeContext& context, size_t min_inputs, size_t max_inputs, bool allow_complex) { auto num_inputs = context.get_input_size(); FRONT_END_OP_CONVERSION_CHECK(num_inputs >= min_inputs, "Got less inputs ", num_inputs, " than expected ", min_inputs); + if (!allow_complex) { + // verify that no input is complex + for (int i = 0; i < static_cast(std::min(num_inputs, max_inputs)); ++i) { + auto input = context.get_input(i); + auto complex_type_mark = as_type_ptr(input.get_node_shared_ptr()); + PYTORCH_OP_CONVERSION_CHECK(!complex_type_mark, "The operation doesn't allow complex type."); + } + } + // Check that additional inputs are all None, otherwise raise exception for (auto i = max_inputs; i < num_inputs; i++) { - FRONT_END_OP_CONVERSION_CHECK(context.input_is_none(i), "Got more inputs than expected."); + FRONT_END_OP_CONVERSION_CHECK(context.input_is_none(i), "Got more inputs than expected: ", i + 1); } } @@ -836,6 +846,16 @@ bool index_tensor_on_list(ov::pass::NodeRegistry& rg, return true; } +Output get_complex_shape(const NodeContext& context, const Output& complex_input) { + auto input_shape = context.mark_node(std::make_shared(complex_input, element::i32)); + + auto zero = v0::Constant::create(element::i32, Shape{1}, {0}); + auto stop = v0::Constant::create(element::i32, Shape{1}, {-1}); + auto step = v0::Constant::create(element::i32, Shape{1}, {1}); + // Removing last dim from shape + return context.mark_node(std::make_shared(input_shape, zero, stop, step, zero)); +} + } // namespace pytorch } // namespace frontend } // namespace ov diff --git a/src/frontends/pytorch/src/utils.hpp b/src/frontends/pytorch/src/utils.hpp index 5eb3f4aa4f64c0..ece73b3ea86ea1 100644 --- a/src/frontends/pytorch/src/utils.hpp +++ b/src/frontends/pytorch/src/utils.hpp @@ -35,7 +35,7 @@ const std::string& get_pytorch_prefix(); OPENVINO_ASSERT_HELPER(::ov::frontend::OpConversionFailure, "", (COND), get_pytorch_prefix(), __VA_ARGS__) #endif -void num_inputs_check(const NodeContext& context, size_t min_inputs, size_t max_inputs); +void num_inputs_check(const NodeContext& context, size_t min_inputs, size_t max_inputs, bool allow_complex = false); Output make_optional_bias(const Output& base_op, const NodeContext& context, @@ -136,6 +136,8 @@ bool index_tensor_on_list(ov::pass::NodeRegistry& rg, Output& new_output, bool& use_input_as_output); +Output get_complex_shape(const NodeContext& context, const Output& complex_input); + namespace op { template OutputVector inplace_op(const NodeContext& context) { diff --git a/tests/layer_tests/pytorch_tests/test_permute.py b/tests/layer_tests/pytorch_tests/test_permute.py index d8fb94145bada7..efbd77d371eb89 100644 --- a/tests/layer_tests/pytorch_tests/test_permute.py +++ b/tests/layer_tests/pytorch_tests/test_permute.py @@ -11,46 +11,54 @@ def _prepare_input(self): import numpy as np return (np.random.randn(1, 3, 224, 224).astype(np.float32),) - def create_model(self, order): + def create_model(self, order, complex_type): import torch class aten_permute(torch.nn.Module): - def __init__(self, order): - super(aten_permute, self).__init__() + def __init__(self, order, complex_type): + super().__init__() self.order = order + self.complex_type = complex_type def forward(self, x): - return torch.permute(x, self.order) - - ref_net = None - - return aten_permute(order), ref_net, "aten::permute" - - @pytest.mark.parametrize("order", [[0, 2, 3, 1], [0, 3, 1, 2], [0, -1, 1, -2]]) + if self.complex_type: + x = torch.reshape(x, x.shape[:-1] + (-1, 2)) + x = torch.view_as_complex(x) + res = torch.permute(x, self.order) + if self.complex_type: + res = torch.view_as_real(res) + return res + + return aten_permute(order, complex_type), None, "aten::permute" + + @pytest.mark.parametrize("order", [[0, 2, 3, 1], + [0, 3, 1, 2], + [0, -1, 1, -2]]) + @pytest.mark.parametrize("complex_type", [True, False]) @pytest.mark.nightly @pytest.mark.precommit @pytest.mark.precommit_torch_export - def test_permute(self, order, ie_device, precision, ir_version): - self._test(*self.create_model(order), ie_device, precision, ir_version) + def test_permute(self, order, complex_type, ie_device, precision, ir_version): + self._test(*self.create_model(order, complex_type), ie_device, precision, ir_version) class TestPermuteList(PytorchLayerTest): def _prepare_input(self, permute_shape): import numpy as np - return (np.random.randn(1, 3, 224, 224).astype(np.float32), np.random.randn(*permute_shape).astype(np.float32)) + return (np.random.randn(1, 3, 224, 224).astype(np.float32), + np.random.randn(*permute_shape).astype(np.float32)) def create_model(self): import torch - class aten_permute(torch.nn.Module): - + class aten_permute_list(torch.nn.Module): def forward(self, x, y): y_shape = y.shape return torch.permute(x, [y_shape[0] - 1, y_shape[1] - 1, y_shape[2] - 1, y_shape[3] - 1]) ref_net = None - return aten_permute(), ref_net, ["aten::permute", "prim::ListConstruct"] + return aten_permute_list(), ref_net, ["aten::permute", "prim::ListConstruct"] @pytest.mark.parametrize("order", [[1, 3, 4, 2], [1, 4, 2, 3]]) @pytest.mark.nightly @@ -58,4 +66,5 @@ def forward(self, x, y): @pytest.mark.precommit_torch_export def test_permute_list(self, order, ie_device, precision, ir_version): self._test(*self.create_model(), ie_device, precision, ir_version, - kwargs_to_prepare_input={"permute_shape": order}, dynamic_shapes=ie_device != "GPU") + kwargs_to_prepare_input={"permute_shape": order}, + dynamic_shapes=ie_device != "GPU") diff --git a/tests/layer_tests/pytorch_tests/test_reshape.py b/tests/layer_tests/pytorch_tests/test_reshape.py index 7174d6022b4ca1..5266e8e00c5c1d 100644 --- a/tests/layer_tests/pytorch_tests/test_reshape.py +++ b/tests/layer_tests/pytorch_tests/test_reshape.py @@ -1,31 +1,38 @@ # Copyright (C) 2018-2025 Intel Corporation # SPDX-License-Identifier: Apache-2.0 +import random import numpy as np import pytest -import random from pytorch_layer_test_class import PytorchLayerTest class TestReshape(PytorchLayerTest): - def _prepare_input(self): - return (np.random.uniform(0, 50, (1, 12, 12, 24)).astype(np.float32)) + def _prepare_input(self, complex_type): + shape = (1, 12, 12, 24) + if complex_type: + shape += (2,) + return (np.random.uniform(0, 50, shape).astype(np.float32)) - def create_model(self, shape): + def create_model(self, shape, complex_type): import torch class aten_reshape(torch.nn.Module): - def __init__(self, shape): - super(aten_reshape, self).__init__() + def __init__(self, shape, complex_type): + super().__init__() self.shape = shape + self.complex_type = complex_type def forward(self, x): - return torch.reshape(x, self.shape) + if self.complex_type: + x = torch.view_as_complex(x) + res = torch.reshape(x, self.shape) + if self.complex_type: + res = torch.view_as_real(res) + return res - ref_net = None - - return aten_reshape(shape), ref_net, "aten::reshape" + return aten_reshape(shape, complex_type), None, "aten::reshape" @pytest.mark.parametrize(("shape"), [ [-1, 6], @@ -37,16 +44,20 @@ def forward(self, x): [24, 1, -1, 12], [24, 1, 1, -1, 12], ]) + @pytest.mark.parametrize("complex_type", [True, False]) @pytest.mark.nightly @pytest.mark.precommit @pytest.mark.precommit_torch_export @pytest.mark.precommit_fx_backend - def test_reshape(self, shape, ie_device, precision, ir_version): - self._test(*self.create_model(shape), ie_device, precision, ir_version) + def test_reshape(self, shape, complex_type, ie_device, precision, ir_version): + self._test(*self.create_model(shape, complex_type), + ie_device, precision, ir_version, + kwargs_to_prepare_input={"complex_type": complex_type}) + class TestDynamicReshape(PytorchLayerTest): def _prepare_input(self): - last_dym = random.randint(1,2) + last_dym = random.randint(1, 2) return (np.random.uniform(0, 50, (1, 12, 12, 24)).astype(np.float32), last_dym) def create_model(self, shape): @@ -54,17 +65,14 @@ def create_model(self, shape): class aten_reshape(torch.nn.Module): def __init__(self, shape): - super(aten_reshape, self).__init__() + super().__init__() self.shape = shape def forward(self, x, dym): - #return torch.reshape(x, self.shape) dym2 = int(torch.ops.aten.sym_size(x, 3)/dym) return torch.reshape(x, [12, 12, dym2, dym]) - ref_net = None - - return aten_reshape(shape), ref_net, "aten::reshape" + return aten_reshape(shape), None, "aten::reshape" @pytest.mark.parametrize(("shape"), [ [12, 12, 24, 1], diff --git a/tests/layer_tests/pytorch_tests/test_size.py b/tests/layer_tests/pytorch_tests/test_size.py index 050d1d818df1b2..f3e0e98dccb327 100644 --- a/tests/layer_tests/pytorch_tests/test_size.py +++ b/tests/layer_tests/pytorch_tests/test_size.py @@ -7,24 +7,38 @@ class TestSize(PytorchLayerTest): - def _prepare_input(self, input_shape): + def _prepare_input(self, input_shape, complex_type): import numpy as np + if complex_type: + input_shape += [2] return (np.random.randn(*input_shape).astype(np.float32),) - def create_model(self): + def create_model(self, complex_type): import torch class aten_size(torch.nn.Module): + def __init__(self, complex_type): + super().__init__() + self.complex_type = complex_type + def forward(self, x): + if self.complex_type: + x = torch.view_as_complex(x) return torch.tensor(x.shape) - ref_net = None + op = aten_size(complex_type) - op = aten_size() + return op, None, "aten::size" - return op, ref_net, "aten::size" @pytest.mark.nightly @pytest.mark.precommit - @pytest.mark.parametrize("input_shape", [[1,], [1, 2], [1, 2, 3], [1, 2, 3, 4], [1, 2, 3, 4, 5]]) - def test_size(self, input_shape, ie_device, precision, ir_version): - self._test(*self.create_model(), ie_device, precision, ir_version, kwargs_to_prepare_input={"input_shape": input_shape}) + @pytest.mark.parametrize("input_shape", [[1,], + [1, 2], + [1, 2, 3], + [1, 2, 3, 4], + [1, 2, 3, 4, 5]]) + @pytest.mark.parametrize("complex_type", [True, False]) + def test_size(self, input_shape, complex_type, ie_device, precision, ir_version): + self._test(*self.create_model(complex_type), ie_device, precision, ir_version, + kwargs_to_prepare_input={"input_shape": input_shape, + "complex_type": complex_type}) diff --git a/tests/layer_tests/pytorch_tests/test_stft.py b/tests/layer_tests/pytorch_tests/test_stft.py index f90962e5f1daa7..a2097b1f1fe453 100644 --- a/tests/layer_tests/pytorch_tests/test_stft.py +++ b/tests/layer_tests/pytorch_tests/test_stft.py @@ -98,7 +98,7 @@ def __init__(self, n_fft, hop_length, win_length, center, pad_mode, normalized, self.return_complex = return_complex def forward(self, x): - return torch.stft( + stft = torch.stft( x, self.n_fft, hop_length=self.hop_length, @@ -110,6 +110,10 @@ def forward(self, x): onesided=self.onesided, return_complex=self.return_complex, ) + if self.return_complex: + return torch.view_as_real(stft) + else: + return stft ref_net = None @@ -128,9 +132,9 @@ def forward(self, x): [16, None, None, False, "reflect", False, True, False], # hop & win length None [16, 4, None, False, "reflect", False, True, False], # win_length None [16, 4, 16, False, "reflect", True, True, False], # normalized True + [16, 4, 16, False, "reflect", False, True, True], # return_complex True # Unsupported cases: [16, 4, 16, False, "reflect", False, False, False], # onesided False - [16, 4, 16, False, "reflect", False, True, True], # reutrn_complex True ]) def test_stft_not_supported_attrs(self, n_fft, hop_length, win_length, center, pad_mode, normalized, onesided, return_complex, ie_device, precision, ir_version, trace_model): if ie_device == "GPU": @@ -144,9 +148,5 @@ def test_stft_not_supported_attrs(self, n_fft, hop_length, win_length, center, p pytest.xfail( reason="aten::stft conversion is currently supported with onesided=True only") - if return_complex is True: - pytest.xfail( - reason="aten::stft conversion is currently supported with return_complex=False only") - self._test(*self.create_model_with_attrs(n_fft, hop_length, win_length, center, pad_mode, normalized, onesided, return_complex), ie_device, precision, ir_version, kwargs_to_prepare_input={}, trace_model=trace_model)