@@ -34,6 +34,7 @@ Status NormalizationOpBuilder::AddToModelBuilderImpl(ModelBuilder& model_builder
3434 const logging::Logger& logger) const {
3535 const auto & op_type = node.OpType ();
3636 const auto & input_defs = node.InputDefs ();
37+ const auto & output_defs = node.OutputDefs ();
3738 ORT_RETURN_IF_NOT (input_defs.size () >= 2 , op_type, " requires at least two inputs." );
3839
3940 emscripten::val input = model_builder.GetOperand (input_defs[0 ]->Name ());
@@ -45,7 +46,8 @@ Status NormalizationOpBuilder::AddToModelBuilderImpl(ModelBuilder& model_builder
4546 options.set (" label" , node.Name ());
4647
4748 std::vector<int64_t > scale_shape;
48- ORT_RETURN_IF_NOT (GetShape (*input_defs[1 ], scale_shape, logger), " Cannot get scale shape" );
49+ const size_t scale_input_index = op_type == " SkipSimplifiedLayerNormalization" ? 2 : 1 ;
50+ ORT_RETURN_IF_NOT (GetShape (*input_defs[scale_input_index], scale_shape, logger), " Cannot get scale shape" );
4951 const auto scale_size = scale_shape.size ();
5052 // Except LayerNormalization, other normalization ops' scale input should be 1-D.
5153 if (op_type == " LayerNormalization" ) {
@@ -55,19 +57,17 @@ Status NormalizationOpBuilder::AddToModelBuilderImpl(ModelBuilder& model_builder
5557 ORT_RETURN_IF_NOT (scale_size == 1 , " The scale size should be one." );
5658 }
5759
58- if (input_defs.size () >= 3 && !input_defs[2 ]->Name ().empty ()) {
60+ emscripten::val scale = model_builder.GetOperand (input_defs[scale_input_index]->Name ());
61+ options.set (" scale" , scale);
62+
63+ const size_t bias_input_index = op_type == " SkipSimplifiedLayerNormalization" ? 3 : 2 ;
64+ emscripten::val bias = emscripten::val::undefined ();
65+ if (TensorExists (input_defs, bias_input_index)) {
5966 // Bias input exists, and bias's shape should be the same as scale's shape.
6067 std::vector<int64_t > bias_shape;
61- ORT_RETURN_IF_NOT (GetShape (*input_defs[2 ], bias_shape, logger), " Cannot get bias shape" );
68+ ORT_RETURN_IF_NOT (GetShape (*input_defs[bias_input_index ], bias_shape, logger), " Cannot get bias shape" );
6269 ORT_RETURN_IF_NOT (bias_shape == scale_shape, " The bias' shape should be equal to scale's shape." );
63- }
64-
65- emscripten::val scale = model_builder.GetOperand (input_defs[1 ]->Name ());
66- options.set (" scale" , scale);
67-
68- if (input_defs.size () >= 3 && !input_defs[2 ]->Name ().empty ()) {
69- // Bias input exists, and bias's shape is the same as scale's shape.
70- emscripten::val bias = model_builder.GetOperand (input_defs[2 ]->Name ());
70+ bias = model_builder.GetOperand (input_defs[bias_input_index]->Name ());
7171 options.set (" bias" , bias);
7272 }
7373
@@ -76,6 +76,8 @@ Status NormalizationOpBuilder::AddToModelBuilderImpl(ModelBuilder& model_builder
7676 options.set (" epsilon" , epsilon);
7777
7878 emscripten::val output = emscripten::val::undefined ();
79+ // SkipSimplifiedLayerNormalization's output: input_skip_bias_sum.
80+ emscripten::val input_skip_bias_sum = emscripten::val::undefined ();
7981 if (op_type == " BatchNormalization" ) {
8082 ORT_RETURN_IF_NOT (input_defs.size () == 5 , " BatchNormalization requires five inputs." );
8183 emscripten::val mean = model_builder.GetOperand (input_defs[3 ]->Name ());
@@ -85,7 +87,9 @@ Status NormalizationOpBuilder::AddToModelBuilderImpl(ModelBuilder& model_builder
8587 }
8688
8789 output = model_builder.GetBuilder ().call <emscripten::val>(" batchNormalization" , input, mean, variance, options);
88- } else if (op_type == " LayerNormalization" || op_type == " SimplifiedLayerNormalization" ) {
90+ } else if (op_type == " LayerNormalization" ||
91+ op_type == " SimplifiedLayerNormalization" ||
92+ op_type == " SkipSimplifiedLayerNormalization" ) {
8993 int64_t axis = helper.Get (" axis" , -1 );
9094 axis = HandleNegativeAxis (axis, rank);
9195 std::vector<uint32_t > axes (rank - SafeInt<uint32_t >(axis));
@@ -94,13 +98,17 @@ Status NormalizationOpBuilder::AddToModelBuilderImpl(ModelBuilder& model_builder
9498 if (op_type == " LayerNormalization" ) {
9599 options.set (" axes" , emscripten::val::array (axes));
96100 output = model_builder.GetBuilder ().call <emscripten::val>(" layerNormalization" , input, options);
97- } else { // SimplifiedLayerNormalization
101+ } else { // SimplifiedLayerNormalization or SkipSimplifiedLayerNormalization
98102 /* *
99- WebNN doesn't support SimplifiedLayerNormalization. So decompose it into a series of ops:
100- X --> Pow --> ReduceMean --> Add --> Sqrt --> Div -> Mul
101- ^ ^ ^ ^ ^
102- | | | | |
103- Y:2 axis B:epsilon A:X A:scale
103+ WebNN doesn't support SimplifiedLayerNormalization or SkipSimplifiedLayerNormalization.
104+ So decompose it into a series of ops:
105+ X --> Pow --> ReduceMean --> Add --> Sqrt --> Div -> Mul -> Add (optional)
106+ ^ ^ ^ ^ ^ ^
107+ | | | | | |
108+ Y:2 axis B:epsilon A:X A:scale B:bias
109+
110+ If it is SkipSimplifiedLayerNormalization and its output input_skip_bias_sum exists,
111+ input_skip_bias_sum = X + skip + bias (if it exists)
104112 */
105113
106114 int32_t input_type;
@@ -137,6 +145,25 @@ Status NormalizationOpBuilder::AddToModelBuilderImpl(ModelBuilder& model_builder
137145 // Mul
138146 common_options.set (" label" , node.Name () + " _mul" );
139147 output = model_builder.GetBuilder ().call <emscripten::val>(" mul" , scale, div, common_options);
148+
149+ // Add (if bias exits)
150+ if (!bias.isUndefined ()) {
151+ common_options.set (" label" , node.Name () + " _add_bias" );
152+ output = model_builder.GetBuilder ().call <emscripten::val>(" add" , output, bias, common_options);
153+ }
154+
155+ // SkipSimplifiedLayerNormalization's output input_skip_bias_sum is the sum of input, skip, and bias.
156+ if (op_type == " SkipSimplifiedLayerNormalization" && TensorExists (output_defs, 3 )) {
157+ emscripten::val skip = model_builder.GetOperand (input_defs[1 ]->Name ());
158+ common_options.set (" label" , node.Name () + " _add_skip" );
159+ input_skip_bias_sum = model_builder.GetBuilder ().call <emscripten::val>(" add" , input, skip, common_options);
160+ if (!bias.isUndefined ()) {
161+ common_options.set (" label" , node.Name () + " _add_skip_bias" );
162+ input_skip_bias_sum = model_builder.GetBuilder ().call <emscripten::val>(
163+ " add" , input_skip_bias_sum, bias, common_options);
164+ }
165+ model_builder.AddOperand (output_defs[3 ]->Name (), std::move (input_skip_bias_sum));
166+ }
140167 }
141168 } else if (op_type == " InstanceNormalization" ) {
142169 // WebNN spec only supports 4D input for instanceNormalization.
@@ -188,7 +215,7 @@ Status NormalizationOpBuilder::AddToModelBuilderImpl(ModelBuilder& model_builder
188215 } else {
189216 return ORT_MAKE_STATUS (ONNXRUNTIME, INVALID_ARGUMENT, " Unsupported normalization op: " , op_type);
190217 }
191- model_builder.AddOperand (node. OutputDefs () [0 ]->Name (), std::move (output));
218+ model_builder.AddOperand (output_defs [0 ]->Name (), std::move (output));
192219
193220 return Status::OK ();
194221}
@@ -215,9 +242,21 @@ bool NormalizationOpBuilder::IsOpSupportedImpl(const InitializedTensorSet& initi
215242 }
216243
217244 const auto & output_defs = node.OutputDefs ();
218- if (output_defs.size () != 1 ) {
219- LOGS (logger, VERBOSE) << op_type << " output count must be one." ;
220- return false ;
245+ if (op_type == " SkipSimplifiedLayerNormalization" ) {
246+ if (output_defs.size () > 4 ) {
247+ LOGS (logger, VERBOSE) << " SkipSimplifiedLayerNormalization output count must not exceed 4." ;
248+ return false ;
249+ }
250+ if (TensorExists (output_defs, 1 ) || TensorExists (output_defs, 2 )) {
251+ // Output mean and inv_std_var are used for training mode, which is not supported.
252+ LOGS (logger, VERBOSE) << " SkipSimplifiedLayerNormalization's output mean and inv_std_var are not supported." ;
253+ return false ;
254+ }
255+ } else {
256+ if (output_defs.size () != 1 ) {
257+ LOGS (logger, VERBOSE) << op_type << " output count must be one." ;
258+ return false ;
259+ }
221260 }
222261
223262 if (op_type == " BatchNormalization" && helper.Get (" training_mode" , 0 )) {
@@ -238,9 +277,9 @@ bool NormalizationOpBuilder::HasSupportedInputsImpl(const InitializedTensorSet&
238277 int32_t input2_type; // B data type
239278 int32_t input3_type; // mean data type
240279 int32_t input4_type; // var data type
241- bool has_input2 = input_defs. size () > 2 && input_defs[ 2 ]-> Exists ( );
242- bool has_input3 = input_defs. size () > 3 && input_defs[ 3 ]-> Exists ( );
243- bool has_input4 = input_defs. size () > 3 && input_defs[ 4 ]-> Exists ( );
280+ bool has_input2 = TensorExists (input_defs, 2 );
281+ bool has_input3 = TensorExists (input_defs, 3 );
282+ bool has_input4 = TensorExists ( input_defs, 4 );
244283
245284 if (!GetType (*input_defs[0 ], input0_type, logger) ||
246285 !GetType (*input_defs[1 ], input1_type, logger) ||
@@ -277,6 +316,7 @@ void CreateNormalizationOpBuilder(const std::string& op_type, OpBuilderRegistrat
277316 " InstanceNormalization" ,
278317 " LayerNormalization" ,
279318 " SimplifiedLayerNormalization" ,
319+ " SkipSimplifiedLayerNormalization" ,
280320 };
281321
282322 op_registrations.builders .push_back (std::make_unique<NormalizationOpBuilder>());
0 commit comments