Skip to content

Commit

Permalink
[ONNX] Added SkipSimplifiedLayerNormalization from com.microsoft doma…
Browse files Browse the repository at this point in the history
…in (#27285)

Details:
Microsoft Contrib Operator "SkipSimplifiedLayerNormalization" for ONNX
RT

Tickets:
N/A

---------

Co-authored-by: Georgy Krivoruchko <[email protected]>
  • Loading branch information
vatsalashanubhag and gkrivor authored Nov 26, 2024
1 parent a88bf5a commit adde531
Show file tree
Hide file tree
Showing 3 changed files with 181 additions and 0 deletions.
Original file line number Diff line number Diff line change
@@ -0,0 +1,79 @@
// Copyright (C) 2018-2024 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//

#include "core/operator_set.hpp"
#include "exceptions.hpp"
#include "openvino/frontend/exception.hpp"
#include "openvino/op/add.hpp"
#include "openvino/op/convert.hpp"
#include "openvino/op/divide.hpp"
#include "openvino/op/multiply.hpp"
#include "openvino/op/power.hpp"
#include "openvino/op/range.hpp"
#include "openvino/op/reduce_mean.hpp"
#include "openvino/op/reduce_sum.hpp"
#include "openvino/op/shape_of.hpp"
#include "openvino/op/sqrt.hpp"
#include "utils/common.hpp"

using namespace ov::op;
using ::ONNX_NAMESPACE::TensorProto_DataType;

namespace ov {
namespace frontend {
namespace onnx {
namespace com_microsoft {
namespace opset_1 {

ov::OutputVector skip_simplified_layer_normalization(const ov::frontend::onnx::Node& node) {
common::default_op_checks(node, 3);

const auto inputs = node.get_ov_inputs();
const auto input_tensor = inputs[0];
const auto skip = inputs[1];

CHECK_VALID_NODE(node,
input_tensor.get_element_type() == skip.get_element_type(),
"input tensor and skip must be of same type, got :",
input_tensor.get_element_type(),
skip.get_element_type());

// input + skip
std::shared_ptr<ov::Node> input = std::make_shared<v1::Add>(input_tensor, skip);

// add bias if available
if (inputs.size() == 4) {
const auto bias = inputs[3];
CHECK_VALID_NODE(node,
input_tensor.get_element_type() == bias.get_element_type(),
"input tensor and bias must be of same type, got: ",
input_tensor.get_element_type(),
bias.get_element_type());
input = std::make_shared<v1::Add>(input, bias);
}

float epsilon = node.get_attribute_value<float>("epsilon");
ov::element::Type element_type = input->get_output_element_type(0);

auto squared_input = std::make_shared<v1::Multiply>(input, input);
auto mean = std::make_shared<v1::ReduceMean>(squared_input,
v0::Constant::create(element::i64, {}, {-1}),
true); // mean = (1/N) * Σ(j=1 to N) X_j^2
auto rms_value =
std::make_shared<v0::Sqrt>(std::make_shared<v1::Add>(mean, v0::Constant::create(element_type, {}, {epsilon})));
auto inv_std_var = std::make_shared<v1::Divide>(v0::Constant::create(element_type, {}, {1.0f}), rms_value);
auto normalized = std::make_shared<v1::Multiply>(input, inv_std_var); // X / RMS(X) auto scaled =
auto scaled = std::make_shared<v1::Multiply>(normalized, inputs[2]); // (X / RMS(X)) * scale

return ov::OutputVector{scaled, mean, inv_std_var, input};
}
ONNX_OP("SkipSimplifiedLayerNormalization",
OPSET_SINCE(1),
com_microsoft::opset_1::skip_simplified_layer_normalization,
MICROSOFT_DOMAIN);
} // namespace opset_1
} // namespace com_microsoft
} // namespace onnx
} // namespace frontend
} // namespace ov
Original file line number Diff line number Diff line change
@@ -0,0 +1,80 @@
ir_version: 6
producer_name: "OpenVINO ONNX Frontend"
graph {
node {
input: "input"
input: "skip"
input: "gamma"
input: "bias"
output: "out"
name: "SkipSimplifiedLayerNorm"
op_type: "SkipSimplifiedLayerNormalization"
attribute {
name: "epsilon"
f: 1e-05
type: FLOAT
}
domain: "com.microsoft"
}
initializer {
dims: 4
data_type: 1 # FLOAT32
name: "gamma"
float_data: 0.1
float_data: 0.2
float_data: 0.3
float_data: 0.4
}
initializer {
dims: 4
data_type: 1 # FLOAT32
name: "bias"
float_data: -0.07
float_data: -0.4
float_data: 0.22
float_data: 0.0
}
input {
name: "input"
type {
tensor_type {
elem_type: 1 # FLOAT32
shape {
dim { dim_value: 3 }
dim { dim_value: 2 }
dim { dim_value: 4 }
}
}
}
}
input {
name: "skip"
type {
tensor_type {
elem_type: 1 # FLOAT32
shape {
dim { dim_value: 3 }
dim { dim_value: 2 }
dim { dim_value: 4 }
}
}
}
}
output {
name: "out"
type {
tensor_type {
elem_type: 1 # FLOAT32
shape {
dim { dim_value: 3 }
dim { dim_value: 2 }
dim { dim_value: 4 }
}
}
}
}
}
opset_import {
domain: "com.microsoft"
version: 1
}
22 changes: 22 additions & 0 deletions src/frontends/onnx/tests/onnx_import_com_microsoft.in.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1357,6 +1357,28 @@ OPENVINO_TEST(${BACKEND_NAME}, onnx_com_microsoft_quickgelu) {
}
}

OPENVINO_TEST(${BACKEND_NAME}, onnx_model_skip_simplified_layer_normalization) {
const auto model = convert_model("com.microsoft/skip_simplified_layer_normalization.onnx");
auto test_case = ov::test::TestCase(model, s_device);

std::vector<float> input = {1.f, 2.f, 3.f, 4.f, 5.f, 6.f, 7.f, 8.f, 9.f, 10.f, 11.f, 12.f,
13.f, 14.f, 15.f, 16.f, 17.f, 18.f, 19.f, 20.f, 21.f, 22.f, 23.f, 24.f};
std::vector<float> skip = {1.f, 2.f, 3.f, 4.f, 5.f, 6.f, 7.f, 8.f, 9.f, 10.f, 11.f, 12.f,
13.f, 14.f, 15.f, 16.f, 17.f, 18.f, 19.f, 20.f, 21.f, 22.f, 23.f, 24.f};
std::vector<float> expected_output = {
0.0353291891515255f, 0.1317980140447617f, 0.3415765166282654f, 0.5857689380645752f, 0.07553060352802277f,
0.1764662563800812f, 0.3244849443435669f, 0.4868034422397614f, 0.08510704338550568f, 0.1860678195953369f,
0.3164101839065552f, 0.4556762874126434f, 0.08931596577167511f, 0.1901365667581558f, 0.3122786283493042f,
0.4408963620662689f, 0.09167447686195374f, 0.19237320125103f, 0.3097965121269226f, 0.4322993457317352f,
0.09318139404058456f, 0.1937852799892426f, 0.3081453144550323f, 0.4266831874847412f};

test_case.add_input<float>(Shape{3, 2, 4}, input);
test_case.add_input<float>(Shape{3, 2, 4}, skip);
test_case.add_expected_output<float>({3, 2, 4}, expected_output);

test_case.run();
}

OPENVINO_TEST(${BACKEND_NAME}, onnx_com_microsoft_simplified_layer_normalization_2x2x8) {
const auto model = convert_model("com.microsoft/simplified_layer_normalization_2x2x8.onnx");
auto test_case = ov::test::TestCase(model, s_device);
Expand Down

0 comments on commit adde531

Please sign in to comment.